id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_1050_4
/* Copyright (C) 2000-2012 by George Williams */ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <fontforge-config.h> #include "gdraw.h" #include "ggadgetP.h" #include "gkeysym.h" #include "gresource.h" #include "gwidget.h" #include "ustring.h" #include "utype.h" #include <math.h> extern void (*_GDraw_InsCharHook)(GDisplay *,unichar_t); GBox _GGadget_gtextfield_box = GBOX_EMPTY; /* Don't initialize here */ static GBox glistfield_box = GBOX_EMPTY; /* Don't initialize here */ static GBox glistfieldmenu_box = GBOX_EMPTY; /* Don't initialize here */ static GBox gnumericfield_box = GBOX_EMPTY; /* Don't initialize here */ static GBox gnumericfieldspinner_box = GBOX_EMPTY; /* Don't initialize here */ FontInstance *_gtextfield_font = NULL; static int gtextfield_inited = false; static GResInfo listfield_ri, listfieldmenu_ri, numericfield_ri, numericfieldspinner_ri; static GTextInfo text_lab[] = { { (unichar_t *) "Disabled", NULL, 0, 0, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 0, 0, '\0' }, { (unichar_t *) "Enabled" , NULL, 0, 0, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 0, 0, '\0' } }; static GTextInfo list_choices[] = { { (unichar_t *) "1", NULL, 0, 0, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 0, 0, '\0' }, { (unichar_t *) "2", NULL, 0, 0, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 0, 0, '\0' }, { (unichar_t *) "3", NULL, 0, 0, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 0, 0, '\0' }, GTEXTINFO_EMPTY }; static GGadgetCreateData text_gcd[] = { { GTextFieldCreate, { { 0, 0, 70, 0 }, NULL, 0, 0, 0, 0, 0, &text_lab[0], { NULL }, gg_visible, NULL, NULL }, NULL, NULL }, { GTextFieldCreate, { { 0, 0, 70, 0 }, NULL, 0, 0, 0, 0, 0, &text_lab[1], { NULL }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL } }; static GGadgetCreateData *tarray[] = { GCD_Glue, &text_gcd[0], GCD_Glue, &text_gcd[1], GCD_Glue, NULL, NULL }; static GGadgetCreateData textbox = { GHVGroupCreate, { { 2, 2, 0, 0 }, NULL, 0, 0, 0, 0, 0, NULL, { (GTextInfo *) tarray }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL }; static GResInfo gtextfield_ri = { &listfield_ri, &ggadget_ri,NULL, NULL, &_GGadget_gtextfield_box, &_gtextfield_font, &textbox, NULL, N_("Text Field"), N_("Text Field"), "GTextField", "Gdraw", false, omf_font|omf_padding, NULL, GBOX_EMPTY, NULL, NULL, NULL }; static GGadgetCreateData textlist_gcd[] = { { GListFieldCreate, { GRECT_EMPTY, NULL, 0, 0, 0, 0, 0, &text_lab[0], { list_choices }, gg_visible, NULL, NULL }, NULL, NULL }, { GListFieldCreate, { GRECT_EMPTY, NULL, 0, 0, 0, 0, 0, &text_lab[1], { list_choices }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL } }; static GGadgetCreateData *tlarray[] = { GCD_Glue, &textlist_gcd[0], GCD_Glue, &textlist_gcd[1], GCD_Glue, NULL, NULL }; static GGadgetCreateData textlistbox = { GHVGroupCreate, { { 2, 2, 0, 0 }, NULL, 0, 0, 0, 0, 0, NULL, { (GTextInfo *) tlarray }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL }; static GResInfo listfield_ri = { &listfieldmenu_ri, &gtextfield_ri,&listfieldmenu_ri, &listmark_ri, &glistfield_box, NULL, &textlistbox, NULL, N_("List Field"), N_("List Field (Combo Box)"), "GComboBox", "Gdraw", false, 0, NULL, GBOX_EMPTY, NULL, NULL, NULL }; static GResInfo listfieldmenu_ri = { &numericfield_ri, &listfield_ri, &listmark_ri,NULL, &glistfieldmenu_box, NULL, &textlistbox, NULL, N_("List Field Menu"), N_("Box surrounding the ListMark in a list field (combobox)"), "GComboBoxMenu", "Gdraw", false, omf_padding, NULL, GBOX_EMPTY, NULL, NULL, NULL }; static GGadgetCreateData num_gcd[] = { { GNumericFieldCreate, { { 0, 0, 50, 0 }, NULL, 0, 0, 0, 0, 0, &list_choices[0], { NULL }, gg_visible, NULL, NULL }, NULL, NULL }, { GNumericFieldCreate, { { 0, 0, 50, 0 }, NULL, 0, 0, 0, 0, 0, &list_choices[0], { NULL }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL } }; static GGadgetCreateData *narray[] = { GCD_Glue, &num_gcd[0], GCD_Glue, &num_gcd[1], GCD_Glue, NULL, NULL }; static GGadgetCreateData numbox = { GHVGroupCreate, { { 2, 2, 0, 0 }, NULL, 0, 0, 0, 0, 0, NULL, { (GTextInfo *) narray }, gg_visible|gg_enabled, NULL, NULL }, NULL, NULL }; static GResInfo numericfield_ri = { &numericfieldspinner_ri, &gtextfield_ri,&numericfieldspinner_ri, NULL, &gnumericfield_box, NULL, &numbox, NULL, N_("Numeric Field"), N_("Numeric Field (Spinner)"), "GNumericField", "Gdraw", false, 0, NULL, GBOX_EMPTY, NULL, NULL, NULL }; static GResInfo numericfieldspinner_ri = { NULL, &numericfield_ri,NULL, NULL, &gnumericfieldspinner_box, NULL, &numbox, NULL, N_("Numeric Field Sign"), N_("The box around the up/down arrows of a numeric field (spinner)"), "GNumericFieldSpinner", "Gdraw", false, omf_border_type|omf_border_width|omf_padding, NULL, GBOX_EMPTY, NULL, NULL, NULL }; static unichar_t nullstr[] = { 0 }, nstr[] = { 'n', 0 }, newlinestr[] = { '\n', 0 }, tabstr[] = { '\t', 0 }; static void GListFieldSelected(GGadget *g, int i); static int GTextField_Show(GTextField *gt, int pos); static void GTPositionGIC(GTextField *gt); static void GCompletionDestroy(GCompletionField *gc); static void GTextFieldComplete(GTextField *gt,int from_tab); static int GCompletionHandleKey(GTextField *gt,GEvent *event); static int u2utf8_index(int pos,const char *start) { const char *pt = start; while ( --pos>=0 ) utf8_ildb(&pt); return( pt-start ); } static int utf82u_index(int pos, const char *start) { int uc = 0; const char *end = start+pos; while ( start<end ) { utf8_ildb(&start); ++uc; } return( uc ); } static void GTextFieldChanged(GTextField *gt,int src) { GEvent e; e.type = et_controlevent; e.w = gt->g.base; e.u.control.subtype = et_textchanged; e.u.control.g = &gt->g; e.u.control.u.tf_changed.from_pulldown = src; if ( gt->g.handle_controlevent != NULL ) (gt->g.handle_controlevent)(&gt->g,&e); else GDrawPostEvent(&e); } static void GTextFieldFocusChanged(GTextField *gt,int gained) { GEvent e; if ( (gt->g.box->flags & box_active_border_inner) && ( gt->g.state==gs_enabled || gt->g.state==gs_active )) { int state = gained?gs_active:gs_enabled; if ( state!=gt->g.state ) { gt->g.state = state; GGadgetRedraw((GGadget *) gt); } } e.type = et_controlevent; e.w = gt->g.base; e.u.control.subtype = et_textfocuschanged; e.u.control.g = &gt->g; e.u.control.u.tf_focus.gained_focus = gained; if ( gt->g.handle_controlevent != NULL ) (gt->g.handle_controlevent)(&gt->g,&e); else GDrawPostEvent(&e); } static void GTextFieldPangoRefigureLines(GTextField *gt, int start_of_change) { char *utf8_text, *pt, *ept; unichar_t *upt, *uept; int i, uc; GRect size; free(gt->utf8_text); if ( gt->lines8==NULL ) { gt->lines8 = malloc(gt->lmax*sizeof(int32)); gt->lines8[0] = 0; gt->lines8[1] = -1; } if ( gt->password ) { int cnt = u_strlen(gt->text); utf8_text = malloc(cnt+1); memset(utf8_text,'*',cnt); utf8_text[cnt] = '\0'; } else utf8_text = u2utf8_copy(gt->text); gt->utf8_text = utf8_text; GDrawLayoutInit(gt->g.base,utf8_text,-1,NULL); if ( !gt->multi_line ) { GDrawLayoutExtents(gt->g.base,&size); gt->xmax = size.width; return; } if ( !gt->wrap ) { pt = utf8_text; i=0; while ( ( ept = strchr(pt,'\n'))!=NULL ) { if ( i>=gt->lmax ) { gt->lines8 = realloc(gt->lines8,(gt->lmax+=10)*sizeof(int32)); gt->lines = realloc(gt->lines,gt->lmax*sizeof(int32)); } gt->lines8[i++] = pt-utf8_text; pt = ept+1; } if ( i>=gt->lmax ) { gt->lines8 = realloc(gt->lines8,(gt->lmax+=10)*sizeof(int32)); gt->lines = realloc(gt->lines,gt->lmax*sizeof(int32)); } gt->lines8[i++] = pt-utf8_text; upt = gt->text; i = 0; while ( ( uept = u_strchr(upt,'\n'))!=NULL ) { gt->lines[i++] = upt-gt->text; upt = uept+1; } gt->lines[i++] = upt-gt->text; } else { int lcnt; GDrawLayoutSetWidth(gt->g.base,gt->g.inner.width); lcnt = GDrawLayoutLineCount(gt->g.base); if ( lcnt+2>=gt->lmax ) { gt->lines8 = realloc(gt->lines8,(gt->lmax=lcnt+10)*sizeof(int32)); gt->lines = realloc(gt->lines,gt->lmax*sizeof(int32)); } pt = utf8_text; uc=0; for ( i=0; i<lcnt; ++i ) { gt->lines8[i] = GDrawLayoutLineStart(gt->g.base,i); ept = utf8_text + gt->lines8[i]; while ( pt<ept ) { ++uc; utf8_ildb((const char **) &pt); } gt->lines[i] = uc; } if ( i==0 ) { gt->lines8[i] = strlen(utf8_text); gt->lines[i] = u_strlen(gt->text); } else { gt->lines8[i] = gt->lines8[i-1] + strlen( utf8_text + gt->lines8[i-1]); gt->lines [i] = gt->lines [i-1] + u_strlen( gt->text + gt->lines [i-1]); } } if ( gt->lcnt!=i ) { gt->lcnt = i; if ( gt->vsb!=NULL ) GScrollBarSetBounds(&gt->vsb->g,0,gt->lcnt, gt->g.inner.height<gt->fh? 1 : gt->g.inner.height/gt->fh); if ( gt->loff_top+gt->g.inner.height/gt->fh>gt->lcnt ) { gt->loff_top = gt->lcnt-gt->g.inner.height/gt->fh; if ( gt->loff_top<0 ) gt->loff_top = 0; if ( gt->vsb!=NULL ) GScrollBarSetPos(&gt->vsb->g,gt->loff_top); } } if ( i>=gt->lmax ) gt->lines = realloc(gt->lines,(gt->lmax+=10)*sizeof(int32)); gt->lines8[i] = -1; gt->lines[i++] = -1; GDrawLayoutExtents(gt->g.base,&size); gt->xmax = size.width; if ( gt->hsb!=NULL ) { GScrollBarSetBounds(&gt->hsb->g,0,gt->xmax,gt->g.inner.width); } GDrawLayoutSetWidth(gt->g.base,-1); } static void GTextFieldRefigureLines(GTextField *gt, int start_of_change) { GDrawSetFont(gt->g.base,gt->font); if ( gt->lines==NULL ) { gt->lines = malloc(10*sizeof(int32)); gt->lines[0] = 0; gt->lines[1] = -1; gt->lmax = 10; gt->lcnt = 1; if ( gt->vsb!=NULL ) GScrollBarSetBounds(&gt->vsb->g,0,gt->lcnt, gt->g.inner.height<gt->fh ? 1 : gt->g.inner.height/gt->fh); } GTextFieldPangoRefigureLines(gt,start_of_change); return; } static void _GTextFieldReplace(GTextField *gt, const unichar_t *str) { unichar_t *old = gt->oldtext; unichar_t *new = malloc((u_strlen(gt->text)-(gt->sel_end-gt->sel_start) + u_strlen(str)+1)*sizeof(unichar_t)); gt->oldtext = gt->text; gt->sel_oldstart = gt->sel_start; gt->sel_oldend = gt->sel_end; gt->sel_oldbase = gt->sel_base; u_strncpy(new,gt->text,gt->sel_start); u_strcpy(new+gt->sel_start,str); gt->sel_start = u_strlen(new); u_strcpy(new+gt->sel_start,gt->text+gt->sel_end); gt->text = new; gt->sel_end = gt->sel_base = gt->sel_start; free(old); GTextFieldRefigureLines(gt,gt->sel_oldstart); } static void GTextField_Replace(GTextField *gt, const unichar_t *str) { _GTextFieldReplace(gt,str); GTextField_Show(gt,gt->sel_start); } static int GTextFieldFindLine(GTextField *gt, int pos) { int i; for ( i=0; gt->lines[i+1]!=-1; ++i ) if ( pos<gt->lines[i+1]) break; return( i ); } static unichar_t *GTextFieldGetPtFromPos(GTextField *gt,int i,int xpos) { int ll; unichar_t *end; ll = gt->lines[i+1]==-1?-1:gt->lines[i+1]-gt->lines[i]-1; int index8, uc; if ( gt->lines8[i+1]==-1 ) GDrawLayoutInit(gt->g.base,gt->utf8_text + gt->lines8[i],-1,NULL); else { GDrawLayoutInit(gt->g.base,gt->utf8_text + gt->lines8[i], gt->lines8[i+1]-gt->lines8[i], NULL); } index8 = GDrawLayoutXYToIndex(gt->g.base,xpos-gt->g.inner.x+gt->xoff_left,0); uc = utf82u_index(index8,gt->utf8_text + gt->lines8[i]); end = gt->text + gt->lines[i] + uc; return( end ); } static int GTextField_Show(GTextField *gt, int pos) { int i, ll, xoff, loff; int refresh=false; GListField *ge = (GListField *) gt; int width = gt->g.inner.width; if ( gt->listfield || gt->numericfield ) width = ge->fieldrect.width - 2*(gt->g.inner.x - gt->g.r.x); if ( pos < 0 ) pos = 0; if ( pos > u_strlen(gt->text)) pos = u_strlen(gt->text); i = GTextFieldFindLine(gt,pos); loff = gt->loff_top; if ( gt->lcnt<gt->g.inner.height/gt->fh || loff==0 ) loff = 0; if ( i<loff ) loff = i; if ( i>=loff+gt->g.inner.height/gt->fh ) { loff = i-(gt->g.inner.height/gt->fh); if ( gt->g.inner.height/gt->fh>2 ) ++loff; } xoff = gt->xoff_left; if ( gt->lines[i+1]==-1 ) ll = -1; else ll = gt->lines[i+1]-gt->lines[i]-1; GRect size; if ( gt->lines8[i+1]==-1 ) ll = strlen(gt->utf8_text+gt->lines8[i]); else ll = gt->lines8[i+1]-gt->lines8[i]-1; GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[i],ll,NULL); GDrawLayoutExtents(gt->g.base,&size); if ( size.width < width ) xoff = 0; else { int index8 = u2utf8_index(pos- gt->lines8[i],gt->utf8_text + gt->lines8[i]); GDrawLayoutIndexToPos(gt->g.base,index8,&size); if ( size.x + 2*size.width < width ) xoff = 0; else xoff = size.x - (width - size.width)/2; if ( xoff<0 ) xoff = 0; } if ( xoff!=gt->xoff_left ) { gt->xoff_left = xoff; if ( gt->hsb!=NULL ) GScrollBarSetPos(&gt->hsb->g,xoff); refresh = true; } if ( loff!=gt->loff_top ) { gt->loff_top = loff; if ( gt->vsb!=NULL ) GScrollBarSetPos(&gt->vsb->g,loff); refresh = true; } GTPositionGIC(gt); return( refresh ); } static void *genunicodedata(void *_gt,int32 *len) { GTextField *gt = _gt; unichar_t *temp; *len = gt->sel_end-gt->sel_start + 1; temp = malloc((*len+2)*sizeof(unichar_t)); temp[0] = 0xfeff; /* KDE expects a byte order flag */ u_strncpy(temp+1,gt->text+gt->sel_start,gt->sel_end-gt->sel_start); temp[*len+1] = 0; return( temp ); } static void *genutf8data(void *_gt,int32 *len) { GTextField *gt = _gt; unichar_t *temp =u_copyn(gt->text+gt->sel_start,gt->sel_end-gt->sel_start); char *ret = u2utf8_copy(temp); free(temp); *len = strlen(ret); return( ret ); } static void *ddgenunicodedata(void *_gt,int32 *len) { void *temp = genunicodedata(_gt,len); GTextField *gt = _gt; _GTextFieldReplace(gt,nullstr); _ggadget_redraw(&gt->g); return( temp ); } static void *genlocaldata(void *_gt,int32 *len) { GTextField *gt = _gt; unichar_t *temp =u_copyn(gt->text+gt->sel_start,gt->sel_end-gt->sel_start); char *ret = u2def_copy(temp); free(temp); *len = strlen(ret); return( ret ); } static void *ddgenlocaldata(void *_gt,int32 *len) { void *temp = genlocaldata(_gt,len); GTextField *gt = _gt; _GTextFieldReplace(gt,nullstr); _ggadget_redraw(&gt->g); return( temp ); } static void noop(void *_gt) { } static void GTextFieldGrabPrimarySelection(GTextField *gt) { int ss = gt->sel_start, se = gt->sel_end; GDrawGrabSelection(gt->g.base,sn_primary); gt->sel_start = ss; gt->sel_end = se; GDrawAddSelectionType(gt->g.base,sn_primary,"text/plain;charset=ISO-10646-UCS-4",gt,gt->sel_end-gt->sel_start, sizeof(unichar_t), genunicodedata,noop); GDrawAddSelectionType(gt->g.base,sn_primary,"UTF8_STRING",gt,gt->sel_end-gt->sel_start, sizeof(char), genutf8data,noop); GDrawAddSelectionType(gt->g.base,sn_primary,"text/plain;charset=UTF-8",gt,gt->sel_end-gt->sel_start, sizeof(char), genutf8data,noop); GDrawAddSelectionType(gt->g.base,sn_primary,"STRING",gt,gt->sel_end-gt->sel_start, sizeof(char), genlocaldata,noop); } static void GTextFieldGrabDDSelection(GTextField *gt) { GDrawGrabSelection(gt->g.base,sn_drag_and_drop); GDrawAddSelectionType(gt->g.base,sn_drag_and_drop,"text/plain;charset=ISO-10646-UCS-4",gt,gt->sel_end-gt->sel_start, sizeof(unichar_t), ddgenunicodedata,noop); GDrawAddSelectionType(gt->g.base,sn_drag_and_drop,"STRING",gt,gt->sel_end-gt->sel_start,sizeof(char), ddgenlocaldata,noop); } static void GTextFieldGrabSelection(GTextField *gt, enum selnames sel ) { if ( gt->sel_start!=gt->sel_end ) { unichar_t *temp; char *ctemp, *ctemp2; int i; uint16 *u2temp; GDrawGrabSelection(gt->g.base,sel); temp = malloc((gt->sel_end-gt->sel_start + 2)*sizeof(unichar_t)); temp[0] = 0xfeff; /* KDE expects a byte order flag */ u_strncpy(temp+1,gt->text+gt->sel_start,gt->sel_end-gt->sel_start); ctemp = u2utf8_copy(temp+1); ctemp2 = u2def_copy(temp+1); GDrawAddSelectionType(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-4",temp,u_strlen(temp), sizeof(unichar_t), NULL,NULL); u2temp = malloc((gt->sel_end-gt->sel_start + 2)*sizeof(uint16)); for ( i=0; temp[i]!=0; ++i ) u2temp[i] = temp[i]; u2temp[i] = 0; GDrawAddSelectionType(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-2",u2temp,u_strlen(temp), 2, NULL,NULL); GDrawAddSelectionType(gt->g.base,sel,"UTF8_STRING",copy(ctemp),strlen(ctemp), sizeof(char), NULL,NULL); GDrawAddSelectionType(gt->g.base,sel,"text/plain;charset=UTF-8",ctemp,strlen(ctemp), sizeof(char), NULL,NULL); if ( ctemp2!=NULL && *ctemp2!='\0' /*strlen(ctemp2)==gt->sel_end-gt->sel_start*/ ) GDrawAddSelectionType(gt->g.base,sel,"STRING",ctemp2,strlen(ctemp2), sizeof(char), NULL,NULL); else free(ctemp2); } } static int GTextFieldSelBackword(unichar_t *text,int start) { unichar_t ch = text[start-1]; if ( start==0 ) /* Can't go back */; else if ( isalnum(ch) || ch=='_' ) { int i; for ( i=start-1; i>=0 && ((text[i]<0x10000 && isalnum(text[i])) || text[i]=='_') ; --i ); start = i+1; } else { int i; for ( i=start-1; i>=0 && !(text[i]<0x10000 && isalnum(text[i])) && text[i]!='_' ; --i ); start = i+1; } return( start ); } static int GTextFieldSelForeword(unichar_t *text,int end) { unichar_t ch = text[end]; if ( ch=='\0' ) /* Nothing */; else if ( isalnum(ch) || ch=='_' ) { int i; for ( i=end; (text[i]<0x10000 && isalnum(text[i])) || text[i]=='_' ; ++i ); end = i; } else { int i; for ( i=end; !(text[i]<0x10000 && isalnum(text[i])) && text[i]!='_' && text[i]!='\0' ; ++i ); end = i; } return( end ); } static void GTextFieldSelectWord(GTextField *gt,int mid, int16 *start, int16 *end) { unichar_t *text; unichar_t ch = gt->text[mid]; text = gt->text; ch = text[mid]; if ( ch=='\0' ) *start = *end = mid; else if ( (ch<0x10000 && isspace(ch)) ) { int i; for ( i=mid; text[i]<0x10000 && isspace(text[i]); ++i ); *end = i; for ( i=mid-1; i>=0 && text[i]<0x10000 && isspace(text[i]) ; --i ); *start = i+1; } else if ( (ch<0x10000 && isalnum(ch)) || ch=='_' ) { int i; for ( i=mid; (text[i]<0x10000 && isalnum(text[i])) || text[i]=='_' ; ++i ); *end = i; for ( i=mid-1; i>=0 && ((text[i]<0x10000 && isalnum(text[i])) || text[i]=='_') ; --i ); *start = i+1; } else { int i; for ( i=mid; !(text[i]<0x10000 && isalnum(text[i])) && text[i]!='_' && text[i]!='\0' ; ++i ); *end = i; for ( i=mid-1; i>=0 && !(text[i]<0x10000 && isalnum(text[i])) && text[i]!='_' ; --i ); *start = i+1; } } static void GTextFieldSelectWords(GTextField *gt,int last) { int16 ss, se; GTextFieldSelectWord(gt,gt->sel_base,&gt->sel_start,&gt->sel_end); if ( last!=gt->sel_base ) { GTextFieldSelectWord(gt,last,&ss,&se); if ( ss<gt->sel_start ) gt->sel_start = ss; if ( se>gt->sel_end ) gt->sel_end = se; } } static void GTextFieldPaste(GTextField *gt,enum selnames sel) { if ( GDrawSelectionHasType(gt->g.base,sel,"UTF8_STRING") || GDrawSelectionHasType(gt->g.base,sel,"text/plain;charset=UTF-8")) { unichar_t *temp; char *ctemp; int32 len; ctemp = GDrawRequestSelection(gt->g.base,sel,"UTF8_STRING",&len); if ( ctemp==NULL || len==0 ) ctemp = GDrawRequestSelection(gt->g.base,sel,"text/plain;charset=UTF-8",&len); if ( ctemp!=NULL ) { temp = utf82u_copyn(ctemp,strlen(ctemp)); GTextField_Replace(gt,temp); free(ctemp); free(temp); } /* Bug in the xorg library on 64 bit machines and 32 bit transfers don't work */ /* so avoid them, by looking for utf8 first */ } else if ( GDrawSelectionHasType(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-4")) { unichar_t *temp; int32 len; temp = GDrawRequestSelection(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-4",&len); /* Bug! I don't handle byte reversed selections. But I don't think there should be any anyway... */ if ( temp!=NULL ) GTextField_Replace(gt,temp[0]==0xfeff?temp+1:temp); free(temp); } else if ( GDrawSelectionHasType(gt->g.base,sel,"Unicode") || GDrawSelectionHasType(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-2")) { unichar_t *temp; uint16 *temp2; int32 len; temp2 = GDrawRequestSelection(gt->g.base,sel,"text/plain;charset=ISO-10646-UCS-2",&len); if ( temp2==NULL || len==0 ) temp2 = GDrawRequestSelection(gt->g.base,sel,"Unicode",&len); if ( temp2!=NULL ) { int i; temp = malloc((len/2+1)*sizeof(unichar_t)); for ( i=0; temp2[i]!=0; ++i ) temp[i] = temp2[i]; temp[i] = 0; GTextField_Replace(gt,temp[0]==0xfeff?temp+1:temp); free(temp); } free(temp2); } else if ( GDrawSelectionHasType(gt->g.base,sel,"STRING")) { unichar_t *temp; char *ctemp; int32 len; ctemp = GDrawRequestSelection(gt->g.base,sel,"STRING",&len); if ( ctemp==NULL || len==0 ) ctemp = GDrawRequestSelection(gt->g.base,sel,"text/plain;charset=UTF-8",&len); if ( ctemp!=NULL ) { temp = def2u_copy(ctemp); GTextField_Replace(gt,temp); free(ctemp); free(temp); } } } static int gtextfield_editcmd(GGadget *g,enum editor_commands cmd) { GTextField *gt = (GTextField *) g; switch ( cmd ) { case ec_selectall: gt->sel_start = 0; gt->sel_end = u_strlen(gt->text); return( true ); case ec_clear: GTextField_Replace(gt,nullstr); _ggadget_redraw(g); return( true ); case ec_cut: GTextFieldGrabSelection(gt,sn_clipboard); GTextField_Replace(gt,nullstr); _ggadget_redraw(g); break; case ec_copy: GTextFieldGrabSelection(gt,sn_clipboard); return( true ); case ec_paste: GTextFieldPaste(gt,sn_clipboard); GTextField_Show(gt,gt->sel_start); _ggadget_redraw(g); break; case ec_undo: if ( gt->oldtext!=NULL ) { unichar_t *temp = gt->text; int16 s; gt->text = gt->oldtext; gt->oldtext = temp; s = gt->sel_start; gt->sel_start = gt->sel_oldstart; gt->sel_oldstart = s; s = gt->sel_end; gt->sel_end = gt->sel_oldend; gt->sel_oldend = s; s = gt->sel_base; gt->sel_base = gt->sel_oldbase; gt->sel_oldbase = s; GTextFieldRefigureLines(gt, 0); GTextField_Show(gt,gt->sel_end); _ggadget_redraw(g); } break; case ec_redo: /* Hmm. not sure */ /* we don't do anything */ return( true ); /* but probably best to return success */ case ec_backword: if ( gt->sel_start==gt->sel_end && gt->sel_start!=0 ) { gt->sel_start = GTextFieldSelBackword(gt->text,gt->sel_start); } GTextField_Replace(gt,nullstr); _ggadget_redraw(g); break; case ec_deleteword: if ( gt->sel_start==gt->sel_end && gt->sel_start!=0 ) GTextFieldSelectWord(gt,gt->sel_start,&gt->sel_start,&gt->sel_end); GTextField_Replace(gt,nullstr); _ggadget_redraw(g); break; default: return( false ); } GTextFieldChanged(gt,-1); return( true ); } static int _gtextfield_editcmd(GGadget *g,enum editor_commands cmd) { if ( gtextfield_editcmd(g,cmd)) { _ggadget_redraw(g); GTPositionGIC((GTextField *) g); return( true ); } return( false ); } static int GTBackPos(GTextField *gt,int pos, int ismeta) { int newpos; if ( ismeta ) newpos = GTextFieldSelBackword(gt->text,pos); else newpos = pos-1; if ( newpos==-1 ) newpos = pos; return( newpos ); } static int GTForePos(GTextField *gt,int pos, int ismeta) { int newpos=pos; if ( ismeta ) newpos = GTextFieldSelForeword(gt->text,pos); else { if ( gt->text[pos]!=0 ) newpos = pos+1; } return( newpos ); } unichar_t *_GGadgetFileToUString(char *filename,int max) { FILE *file; int ch, ch2, ch3; int format=0; unichar_t *space, *upt, *end; file = fopen( filename,"r" ); if ( file==NULL ) return( NULL ); ch = getc(file); ch2 = getc(file); ch3 = getc(file); ungetc(ch3,file); if ( ch==0xfe && ch2==0xff ) format = 1; /* normal ucs2 */ else if ( ch==0xff && ch2==0xfe ) format = 2; /* byte-swapped ucs2 */ else if ( ch==0xef && ch2==0xbb && ch3==0xbf ) { format = 3; /* utf8 */ getc(file); } else { getc(file); /* rewind probably undoes the ungetc, but let's not depend on it */ rewind(file); } space = upt = malloc((max+1)*sizeof(unichar_t)); end = space+max; if ( format==3 ) { /* utf8 */ while ( upt<end ) { ch=getc(file); if ( ch==EOF ) break; if ( ch<0x80 ) *upt++ = ch; else if ( ch<0xe0 ) { ch2 = getc(file); *upt++ = ((ch&0x1f)<<6)|(ch2&0x3f); } else if ( ch<0xf0 ) { ch2 = getc(file); ch3 = getc(file); *upt++ = ((ch&0xf)<<12)|((ch2&0x3f)<<6)|(ch3&0x3f); } else { int ch4, w; ch2 = getc(file); ch3 = getc(file); ch4=getc(file); w = ( ((ch&7)<<2) | ((ch2&0x30)>>4) ) -1; *upt++ = 0xd800 | (w<<6) | ((ch2&0xf)<<2) | ((ch3&0x30)>>4); if ( upt<end ) *upt++ = 0xdc00 | ((ch3&0xf)<<6) | (ch4&0x3f); } } } else if ( format!=0 ) { while ( upt<end ) { ch = getc(file); ch2 = getc(file); if ( ch2==EOF ) break; if ( format==1 ) *upt ++ = (ch<<8)|ch2; else *upt ++ = (ch2<<8)|ch; } } else { char buffer[400]; while ( fgets(buffer,sizeof(buffer),file)!=NULL ) { def2u_strncpy(upt,buffer,end-upt); upt += u_strlen(upt); } } *upt = '\0'; fclose(file); return( space ); } static unichar_t txt[] = { '*','.','{','t','x','t',',','p','y','}', '\0' }; static unichar_t errort[] = { 'C','o','u','l','d',' ','n','o','t',' ','o','p','e','n', '\0' }; static unichar_t error[] = { 'C','o','u','l','d',' ','n','o','t',' ','o','p','e','n',' ','%','.','1','0','0','h','s', '\0' }; static void GTextFieldImport(GTextField *gt) { unichar_t *ret; char *cret; unichar_t *str; if ( _ggadget_use_gettext ) { char *temp = GWidgetOpenFile8(_("Open"),NULL,"*.{txt,py}",NULL,NULL); ret = utf82u_copy(temp); free(temp); } else { ret = GWidgetOpenFile(GStringGetResource(_STR_Open,NULL),NULL, txt,NULL,NULL); } if ( ret==NULL ) return; cret = u2def_copy(ret); free(ret); str = _GGadgetFileToUString(cret,65536); if ( str==NULL ) { if ( _ggadget_use_gettext ) GWidgetError8(_("Could not open file"), _("Could not open %.100s"),cret); else GWidgetError(errort,error,cret); free(cret); return; } free(cret); GTextField_Replace(gt,str); free(str); } static void GTextFieldSave(GTextField *gt,int utf8) { unichar_t *ret; char *cret; FILE *file; unichar_t *pt; if ( _ggadget_use_gettext ) { char *temp = GWidgetOpenFile8(_("Save"),NULL,"*.{txt,py}",NULL,NULL); ret = utf82u_copy(temp); free(temp); } else ret = GWidgetSaveAsFile(GStringGetResource(_STR_Save,NULL),NULL, txt,NULL,NULL); if ( ret==NULL ) return; cret = u2def_copy(ret); free(ret); file = fopen(cret,"w"); if ( file==NULL ) { if ( _ggadget_use_gettext ) GWidgetError8(_("Could not open file"), _("Could not open %.100s"),cret); else GWidgetError(errort,error,cret); free(cret); return; } free(cret); if ( utf8 ) { putc(0xef,file); /* Zero width something or other. Marks this as unicode, utf8 */ putc(0xbb,file); putc(0xbf,file); for ( pt = gt->text ; *pt; ++pt ) { if ( *pt<0x80 ) putc(*pt,file); else if ( *pt<0x800 ) { putc(0xc0 | (*pt>>6), file); putc(0x80 | (*pt&0x3f), file); } else if ( *pt>=0xd800 && *pt<0xdc00 && pt[1]>=0xdc00 && pt[1]<0xe000 ) { int u = ((*pt>>6)&0xf)+1, y = ((*pt&3)<<4) | ((pt[1]>>6)&0xf); putc( 0xf0 | (u>>2),file ); putc( 0x80 | ((u&3)<<4) | ((*pt>>2)&0xf),file ); putc( 0x80 | y,file ); putc( 0x80 | (pt[1]&0x3f),file ); } else { putc( 0xe0 | (*pt>>12),file ); putc( 0x80 | ((*pt>>6)&0x3f),file ); putc( 0x80 | (*pt&0x3f),file ); } } } else { putc(0xfeff>>8,file); /* Zero width something or other. Marks this as unicode */ putc(0xfeff&0xff,file); for ( pt = gt->text ; *pt; ++pt ) { putc(*pt>>8,file); putc(*pt&0xff,file); } } fclose(file); } #define MID_Cut 1 #define MID_Copy 2 #define MID_Paste 3 #define MID_SelectAll 4 #define MID_Save 5 #define MID_SaveUCS2 6 #define MID_Import 7 #define MID_Undo 8 static GTextField *popup_kludge; static void GTFPopupInvoked(GWindow v, GMenuItem *mi,GEvent *e) { GTextField *gt; if ( popup_kludge==NULL ) return; gt = popup_kludge; popup_kludge = NULL; switch ( mi->mid ) { case MID_Undo: gtextfield_editcmd(&gt->g,ec_undo); break; case MID_Cut: gtextfield_editcmd(&gt->g,ec_cut); break; case MID_Copy: gtextfield_editcmd(&gt->g,ec_copy); break; case MID_Paste: gtextfield_editcmd(&gt->g,ec_paste); break; case MID_SelectAll: gtextfield_editcmd(&gt->g,ec_selectall); break; case MID_Save: GTextFieldSave(gt,true); break; case MID_SaveUCS2: GTextFieldSave(gt,false); break; case MID_Import: GTextFieldImport(gt); break; } _ggadget_redraw(&gt->g); } static GMenuItem gtf_popuplist[] = { { { (unichar_t *) "_Undo", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'Z', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Undo }, { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 1, 0, 0, 0, '\0' }, '\0', 0, NULL, NULL, NULL, 0 }, /* line */ { { (unichar_t *) "Cu_t", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'X', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Cut }, { { (unichar_t *) "_Copy", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'C', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Copy }, { { (unichar_t *) "_Paste", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'V', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Paste }, { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 1, 0, 0, 0, '\0' }, '\0', 0, NULL, NULL, NULL, 0 }, /* line */ { { (unichar_t *) "_Save in UTF8", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'S', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Save }, { { (unichar_t *) "Save in _UCS2", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, '\0', ksm_control, NULL, NULL, GTFPopupInvoked, MID_SaveUCS2 }, { { (unichar_t *) "_Import", NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 0, 0, 0, 0, 0, 1, 1, 0, '\0' }, 'I', ksm_control, NULL, NULL, GTFPopupInvoked, MID_Import }, GMENUITEM_EMPTY }; static int first = true; static void GTFPopupMenu(GTextField *gt, GEvent *event) { int no_sel = gt->sel_start==gt->sel_end; if ( first ) { gtf_popuplist[0].ti.text = (unichar_t *) _("_Undo"); gtf_popuplist[2].ti.text = (unichar_t *) _("Cu_t"); gtf_popuplist[3].ti.text = (unichar_t *) _("_Copy"); gtf_popuplist[4].ti.text = (unichar_t *) _("_Paste"); gtf_popuplist[6].ti.text = (unichar_t *) _("_Save in UTF8"); gtf_popuplist[7].ti.text = (unichar_t *) _("Save in _UCS2"); gtf_popuplist[8].ti.text = (unichar_t *) _("_Import"); first = false; } gtf_popuplist[0].ti.disabled = gt->oldtext==NULL; /* Undo */ gtf_popuplist[2].ti.disabled = no_sel; /* Cut */ gtf_popuplist[3].ti.disabled = no_sel; /* Copy */ gtf_popuplist[4].ti.disabled = !GDrawSelectionHasType(gt->g.base,sn_clipboard,"text/plain;charset=ISO-10646-UCS-2") && !GDrawSelectionHasType(gt->g.base,sn_clipboard,"UTF8_STRING") && !GDrawSelectionHasType(gt->g.base,sn_clipboard,"STRING"); popup_kludge = gt; GMenuCreatePopupMenu(gt->g.base,event, gtf_popuplist); } static void GTextFieldIncrement(GTextField *gt,int amount) { unichar_t *end; double d = u_strtod(gt->text,&end); char buf[40]; while ( *end==' ' ) ++end; if ( *end!='\0' ) { GDrawBeep(NULL); return; } d = floor(d)+amount; sprintf(buf,"%g", d); free(gt->oldtext); gt->oldtext = gt->text; gt->text = uc_copy(buf); free(gt->utf8_text); gt->utf8_text = copy(buf); _ggadget_redraw(&gt->g); GTextFieldChanged(gt,-1); } static int GTextFieldDoChange(GTextField *gt, GEvent *event) { int ss = gt->sel_start, se = gt->sel_end; int pos, l, xpos, sel; unichar_t *upt; if ( ( event->u.chr.state&(GMenuMask()&~ksm_shift)) || event->u.chr.chars[0]<' ' || event->u.chr.chars[0]==0x7f ) { switch ( event->u.chr.keysym ) { case GK_BackSpace: if ( gt->sel_start==gt->sel_end ) { if ( gt->sel_start==0 ) return( 2 ); --gt->sel_start; } GTextField_Replace(gt,nullstr); return( true ); break; case GK_Delete: if ( gt->sel_start==gt->sel_end ) { if ( gt->text[gt->sel_start]==0 ) return( 2 ); ++gt->sel_end; } GTextField_Replace(gt,nullstr); return( true ); break; case GK_Left: case GK_KP_Left: if ( gt->sel_start==gt->sel_end ) { gt->sel_start = GTBackPos(gt,gt->sel_start,event->u.chr.state&ksm_meta); if ( !(event->u.chr.state&ksm_shift )) gt->sel_end = gt->sel_start; } else if ( event->u.chr.state&ksm_shift ) { if ( gt->sel_end==gt->sel_base ) { gt->sel_start = GTBackPos(gt,gt->sel_start,event->u.chr.state&ksm_meta); } else { gt->sel_end = GTBackPos(gt,gt->sel_end,event->u.chr.state&ksm_meta); } } else { gt->sel_end = gt->sel_base = gt->sel_start; } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case GK_Right: case GK_KP_Right: if ( gt->sel_start==gt->sel_end ) { gt->sel_end = GTForePos(gt,gt->sel_start,event->u.chr.state&ksm_meta); if ( !(event->u.chr.state&ksm_shift )) gt->sel_start = gt->sel_end; } else if ( event->u.chr.state&ksm_shift ) { if ( gt->sel_end==gt->sel_base ) { gt->sel_start = GTForePos(gt,gt->sel_start,event->u.chr.state&ksm_meta); } else { gt->sel_end = GTForePos(gt,gt->sel_end,event->u.chr.state&ksm_meta); } } else { gt->sel_start = gt->sel_base = gt->sel_end; } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case GK_Up: case GK_KP_Up: if ( gt->numericfield ) { GTextFieldIncrement(gt,(event->u.chr.state&(ksm_shift|ksm_control))?10:1); return( 2 ); } if ( !gt->multi_line ) break; if ( !( event->u.chr.state&ksm_shift ) && gt->sel_start!=gt->sel_end ) gt->sel_end = gt->sel_base = gt->sel_start; else { pos = gt->sel_start; if ( ( event->u.chr.state&ksm_shift ) && gt->sel_start==gt->sel_base ) pos = gt->sel_end; l = GTextFieldFindLine(gt,gt->sel_start); GRect pos_rect; int ll = gt->lines8[l+1]==-1 ? -1 : gt->lines8[l+1]-gt->lines8[l]; sel = u2utf8_index(gt->sel_start-gt->lines[l],gt->utf8_text+gt->lines8[l]); GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[l],ll,NULL); GDrawLayoutIndexToPos(gt->g.base,sel,&pos_rect); xpos = pos_rect.x; if ( l!=0 ) { GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[l-1],gt->lines8[l]-gt->lines8[l-1],NULL); pos = GDrawLayoutXYToIndex(gt->g.base,xpos,0); pos = utf82u_index(pos+gt->lines8[l-1],gt->utf8_text); } if ( event->u.chr.state&ksm_shift ) { if ( pos<gt->sel_base ) { gt->sel_start = pos; gt->sel_end = gt->sel_base; } else { gt->sel_start = gt->sel_base; gt->sel_end = pos; } } else { gt->sel_start = gt->sel_end = gt->sel_base = pos; } } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case GK_Down: case GK_KP_Down: if ( gt->numericfield ) { GTextFieldIncrement(gt,(event->u.chr.state&(ksm_shift|ksm_control))?-10:-1); return( 2 ); } if ( !gt->multi_line ) break; if ( !( event->u.chr.state&ksm_shift ) && gt->sel_start!=gt->sel_end ) gt->sel_end = gt->sel_base = gt->sel_end; else { pos = gt->sel_start; if ( ( event->u.chr.state&ksm_shift ) && gt->sel_start==gt->sel_base ) pos = gt->sel_end; l = GTextFieldFindLine(gt,gt->sel_start); GRect pos_rect; int ll = gt->lines8[l+1]==-1 ? -1 : gt->lines8[l+1]-gt->lines8[l]; sel = u2utf8_index(gt->sel_start-gt->lines[l],gt->utf8_text+gt->lines8[l]); GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[l],ll,NULL); GDrawLayoutIndexToPos(gt->g.base,sel,&pos_rect); xpos = pos_rect.x; if ( l<gt->lcnt-1 ) { ll = gt->lines8[l+2]==-1 ? -1 : gt->lines8[l+2]-gt->lines8[l+1]; GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[l+1],ll,NULL); pos = GDrawLayoutXYToIndex(gt->g.base,xpos,0); pos = utf82u_index(pos+gt->lines8[l+1],gt->utf8_text); } if ( event->u.chr.state&ksm_shift ) { if ( pos<gt->sel_base ) { gt->sel_start = pos; gt->sel_end = gt->sel_base; } else { gt->sel_start = gt->sel_base; gt->sel_end = pos; } } else { gt->sel_start = gt->sel_end = gt->sel_base = pos; } } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case GK_Home: case GK_Begin: case GK_KP_Home: case GK_KP_Begin: if ( !(event->u.chr.state&ksm_shift) ) { gt->sel_start = gt->sel_base = gt->sel_end = 0; } else { gt->sel_start = 0; gt->sel_end = gt->sel_base; } GTextField_Show(gt,gt->sel_start); return( 2 ); break; /* Move to eol. (if already at eol, move to next eol) */ case 'E': case 'e': if ( !( event->u.chr.state&ksm_control ) ) return( false ); upt = gt->text+gt->sel_base; if ( *upt=='\n' ) ++upt; upt = u_strchr(upt,'\n'); if ( upt==NULL ) upt=gt->text+u_strlen(gt->text); if ( !(event->u.chr.state&ksm_shift) ) { gt->sel_start = gt->sel_base = gt->sel_end =upt-gt->text; } else { gt->sel_start = gt->sel_base; gt->sel_end = upt-gt->text; } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case GK_End: case GK_KP_End: if ( !(event->u.chr.state&ksm_shift) ) { gt->sel_start = gt->sel_base = gt->sel_end = u_strlen(gt->text); } else { gt->sel_start = gt->sel_base; gt->sel_end = u_strlen(gt->text); } GTextField_Show(gt,gt->sel_start); return( 2 ); break; case 'D': case 'd': if ( event->u.chr.state&ksm_control ) { /* delete word */ gtextfield_editcmd(&gt->g,ec_deleteword); GTextField_Show(gt,gt->sel_start); return( true ); } break; case 'W': case 'w': if ( event->u.chr.state&ksm_control ) { /* backword */ gtextfield_editcmd(&gt->g,ec_backword); GTextField_Show(gt,gt->sel_start); return( true ); } break; case 'M': case 'm': case 'J': case 'j': if ( !( event->u.chr.state&ksm_control ) ) return( false ); /* fall through into return case */ case GK_Return: case GK_Linefeed: if ( gt->accepts_returns ) { GTextField_Replace(gt,newlinestr); return( true ); } break; case GK_Tab: if ( gt->completionfield && ((GCompletionField *) gt)->completion!=NULL ) { GTextFieldComplete(gt,true); gt->was_completing = true; return( 3 ); } if ( gt->accepts_tabs ) { GTextField_Replace(gt,tabstr); return( true ); } break; default: if ( GMenuIsCommand(event,H_("Select All|Ctl+A")) ) { gtextfield_editcmd(&gt->g,ec_selectall); return( 2 ); } else if ( GMenuIsCommand(event,H_("Copy|Ctl+C")) ) { gtextfield_editcmd(&gt->g,ec_copy); } else if ( GMenuIsCommand(event,H_("Paste|Ctl+V")) ) { gtextfield_editcmd(&gt->g,ec_paste); GTextField_Show(gt,gt->sel_start); return( true ); } else if ( GMenuIsCommand(event,H_("Cut|Ctl+X")) ) { gtextfield_editcmd(&gt->g,ec_cut); GTextField_Show(gt,gt->sel_start); return( true ); } else if ( GMenuIsCommand(event,H_("Undo|Ctl+Z")) ) { gtextfield_editcmd(&gt->g,ec_undo); GTextField_Show(gt,gt->sel_start); return( true ); } else if ( GMenuIsCommand(event,H_("Save|Ctl+S")) ) { GTextFieldSave(gt,true); return( 2 ); } else if ( GMenuIsCommand(event,H_("Import...|Ctl+Shft+I")) ) { GTextFieldImport(gt); return( true ); } else return( false ); break; } } else { GTextField_Replace(gt,event->u.chr.chars); return( 4 /* Do name completion */ ); } if ( gt->sel_start == gt->sel_end ) gt->sel_base = gt->sel_start; if ( ss!=gt->sel_start || se!=gt->sel_end ) GTextFieldGrabPrimarySelection(gt); return( false ); } static void _gt_cursor_pos(GTextField *gt, int sel_start, int *x, int *y) { int l, sel; *x = -1; *y= -1; GDrawSetFont(gt->g.base,gt->font); l = GTextFieldFindLine(gt,sel_start); if ( l<gt->loff_top || l>=gt->loff_top + ((gt->g.inner.height+gt->fh/2)/gt->fh)) return; *y = (l-gt->loff_top)*gt->fh; GRect pos_rect; int ll = gt->lines8[l+1]==-1 ? -1 : gt->lines8[l+1]-gt->lines8[l]; sel = u2utf8_index(sel_start-gt->lines[l],gt->utf8_text+gt->lines8[l]); GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[l],ll,NULL); GDrawLayoutIndexToPos(gt->g.base,sel,&pos_rect); *x = pos_rect.x - gt->xoff_left; } static void gt_cursor_pos(GTextField *gt, int *x, int *y) { _gt_cursor_pos(gt,gt->sel_start,x,y); } static void GTPositionGIC(GTextField *gt) { int x,y; if ( !gt->g.has_focus || gt->gic==NULL ) return; gt_cursor_pos(gt,&x,&y); if ( x<0 ) return; GDrawSetGIC(gt->g.base,gt->gic,gt->g.inner.x+x,gt->g.inner.y+y+gt->as); } static void gt_draw_cursor(GWindow pixmap, GTextField *gt) { GRect old; int x, y; if ( !gt->cursor_on || gt->sel_start != gt->sel_end ) return; gt_cursor_pos(gt,&x,&y); if ( x<0 || x>=gt->g.inner.width ) return; GDrawPushClip(pixmap,&gt->g.inner,&old); GDrawSetDifferenceMode(pixmap); GDrawSetFont(pixmap,gt->font); GDrawSetLineWidth(pixmap,0); GDrawDrawLine(pixmap,gt->g.inner.x+x,gt->g.inner.y+y, gt->g.inner.x+x,gt->g.inner.y+y+gt->fh, COLOR_WHITE); GDrawPopClip(pixmap,&old); } static void GTextFieldDrawDDCursor(GTextField *gt, int pos) { GRect old; int x, y, l; l = GTextFieldFindLine(gt,pos); if ( l<gt->loff_top || l>=gt->loff_top + (gt->g.inner.height/gt->fh)) return; _gt_cursor_pos(gt,pos,&x,&y); if ( x<0 || x>=gt->g.inner.width ) return; GDrawPushClip(gt->g.base,&gt->g.inner,&old); GDrawSetDifferenceMode(gt->g.base); GDrawSetFont(gt->g.base,gt->font); GDrawSetLineWidth(gt->g.base,0); GDrawSetDashedLine(gt->g.base,2,2,0); GDrawDrawLine(gt->g.base,gt->g.inner.x+x,gt->g.inner.y+y, gt->g.inner.x+x,gt->g.inner.y+y+gt->fh, COLOR_WHITE); GDrawPopClip(gt->g.base,&old); GDrawSetDashedLine(gt->g.base,0,0,0); gt->has_dd_cursor = !gt->has_dd_cursor; gt->dd_cursor_pos = pos; } static void GTextFieldDrawLineSel(GWindow pixmap, GTextField *gt, int line ) { GRect selr, sofar, nextch; int s,e, y,llen,i,j; /* Caller has checked to make sure selection applies to this line */ y = gt->g.inner.y+(line-gt->loff_top)*gt->fh; selr = gt->g.inner; selr.y = y; selr.height = gt->fh; if ( !gt->g.has_focus ) --selr.height; llen = gt->lines[line+1]==-1? u_strlen(gt->text+gt->lines[line])+gt->lines[line]: gt->lines[line+1]; s = gt->sel_start<gt->lines[line]?gt->lines[line]:gt->sel_start; e = gt->sel_end>gt->lines[line+1] && gt->lines[line+1]!=-1?gt->lines[line+1]-1: gt->sel_end; s = u2utf8_index(s-gt->lines[line],gt->utf8_text+gt->lines8[line]); e = u2utf8_index(e-gt->lines[line],gt->utf8_text+gt->lines8[line]); llen = gt->lines8[line+1]==-1? -1 : gt->lines8[line+1]-gt->lines8[line]; GDrawLayoutInit(pixmap,gt->utf8_text+gt->lines8[line],llen,NULL); for ( i=s; i<e; ) { GDrawLayoutIndexToPos(pixmap,i,&sofar); for ( j=i+1; j<e; ++j ) { GDrawLayoutIndexToPos(pixmap,j,&nextch); if ( nextch.x != sofar.x+sofar.width ) break; sofar.width += nextch.width; } if ( sofar.width<0 ) { selr.x = sofar.x+sofar.width + gt->g.inner.x - gt->xoff_left; selr.width = -sofar.width; } else { selr.x = sofar.x + gt->g.inner.x - gt->xoff_left; selr.width = sofar.width; } GDrawFillRect(pixmap,&selr,gt->g.box->active_border); i = j; } } static void GTextFieldDrawLine(GWindow pixmap, GTextField *gt, int line, Color fg ) { int y = gt->g.inner.y+(line-gt->loff_top)*gt->fh; int ll = gt->lines[line+1]==-1 ? -1 : gt->lines[line+1]-gt->lines[line]; ll = gt->lines8[line+1]==-1? -1 : gt->lines8[line+1]-gt->lines8[line]; GDrawLayoutInit(pixmap,gt->utf8_text+gt->lines8[line],ll,NULL); GDrawLayoutDraw(pixmap,gt->g.inner.x-gt->xoff_left,y+gt->as,fg); } static int gtextfield_expose(GWindow pixmap, GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; GListField *ge = (GListField *) g; GRect old1, old2, *r = &g->r; Color fg; int ll,i, last; GRect unpadded_inner; int pad; if ( g->state == gs_invisible || gt->dontdraw ) return( false ); if ( gt->listfield || gt->numericfield ) r = &ge->fieldrect; GDrawPushClip(pixmap,r,&old1); GBoxDrawBackground(pixmap,r,g->box, g->state==gs_enabled? gs_pressedactive: g->state,false); GBoxDrawBorder(pixmap,r,g->box,g->state,false); unpadded_inner = g->inner; pad = GDrawPointsToPixels(g->base,g->box->padding); unpadded_inner.x -= pad; unpadded_inner.y -= pad; unpadded_inner.width += 2*pad; unpadded_inner.height += 2*pad; GDrawPushClip(pixmap,&unpadded_inner,&old2); GDrawSetFont(pixmap,gt->font); fg = g->state==gs_disabled?g->box->disabled_foreground: g->box->main_foreground==COLOR_DEFAULT?GDrawGetDefaultForeground(GDrawGetDisplayOfWindow(pixmap)): g->box->main_foreground; ll = 0; if ( (last = gt->g.inner.height/gt->fh)==0 ) last = 1; if ( gt->sel_start != gt->sel_end ) { /* I used to have support for drawing on a bw display where the */ /* selection and the foreground color were the same (black) and */ /* selected text was white. No longer. */ /* Draw the entire selection first, then the text itself */ for ( i=gt->loff_top; i<gt->loff_top+last && gt->lines[i]!=-1; ++i ) { if ( gt->sel_end>gt->lines[i] && (gt->lines[i+1]==-1 || gt->sel_start<gt->lines[i+1])) GTextFieldDrawLineSel(pixmap,gt,i); } } for ( i=gt->loff_top; i<gt->loff_top+last && gt->lines[i]!=-1; ++i ) GTextFieldDrawLine(pixmap,gt,i,fg); GDrawPopClip(pixmap,&old2); GDrawPopClip(pixmap,&old1); gt_draw_cursor(pixmap, gt); if ( gt->listfield ) { int marklen = GDrawPointsToPixels(pixmap,_GListMarkSize); GDrawPushClip(pixmap,&ge->buttonrect,&old1); GBoxDrawBackground(pixmap,&ge->buttonrect,&glistfieldmenu_box, g->state==gs_enabled? gs_pressedactive: g->state,false); GBoxDrawBorder(pixmap,&ge->buttonrect,&glistfieldmenu_box,g->state,false); GListMarkDraw(pixmap, ge->buttonrect.x + (ge->buttonrect.width - marklen)/2, g->inner.y, g->inner.height, g->state); GDrawPopClip(pixmap,&old1); } else if ( gt->numericfield ) { int y, w; int half; GPoint pts[5]; int bp = GBoxBorderWidth(gt->g.base,&gnumericfieldspinner_box); Color fg = g->state==gs_disabled?gnumericfieldspinner_box.disabled_foreground: gnumericfieldspinner_box.main_foreground==COLOR_DEFAULT?GDrawGetDefaultForeground(GDrawGetDisplayOfWindow(pixmap)): gnumericfieldspinner_box.main_foreground; GBoxDrawBackground(pixmap,&ge->buttonrect,&gnumericfieldspinner_box, g->state==gs_enabled? gs_pressedactive: g->state,false); GBoxDrawBorder(pixmap,&ge->buttonrect,&gnumericfieldspinner_box,g->state,false); /* GDrawDrawRect(pixmap,&ge->buttonrect,fg); */ y = ge->buttonrect.y + ge->buttonrect.height/2; w = ge->buttonrect.width; w &= ~1; pts[0].x = ge->buttonrect.x+3+bp; pts[1].x = ge->buttonrect.x+w-3-bp; pts[2].x = ge->buttonrect.x + w/2; half = pts[2].x-pts[0].x; GDrawDrawLine(pixmap, pts[0].x-3,y, pts[1].x+3,y, fg ); pts[0].y = pts[1].y = y-2; pts[2].y = pts[1].y-half; pts[3] = pts[0]; GDrawFillPoly(pixmap,pts,3,fg); pts[0].y = pts[1].y = y+2; pts[2].y = pts[1].y+half; pts[3] = pts[0]; GDrawFillPoly(pixmap,pts,3,fg); } return( true ); } static int glistfield_mouse(GListField *ge, GEvent *event) { if ( event->type!=et_mousedown ) return( true ); if ( ge->popup != NULL ) { GDrawDestroyWindow(ge->popup); ge->popup = NULL; return( true ); } ge->popup = GListPopupCreate(&ge->gt.g,GListFieldSelected,ge->ti); return( true ); } static int gnumericfield_mouse(GTextField *gt, GEvent *event) { GListField *ge = (GListField *) gt; if ( event->type==et_mousedown ) { gt->incr_down = event->u.mouse.y > (ge->buttonrect.y + ge->buttonrect.height/2); GTextFieldIncrement(gt,gt->incr_down?-1:1); if ( gt->numeric_scroll==NULL ) gt->numeric_scroll = GDrawRequestTimer(gt->g.base,200,100,NULL); } else if ( gt->numeric_scroll!=NULL ) { GDrawCancelTimer(gt->numeric_scroll); gt->numeric_scroll = NULL; } return( true ); } static int GTextFieldDoDrop(GTextField *gt,GEvent *event,int endpos) { if ( gt->has_dd_cursor ) GTextFieldDrawDDCursor(gt,gt->dd_cursor_pos); if ( event->type == et_mousemove ) { if ( GGadgetInnerWithin(&gt->g,event->u.mouse.x,event->u.mouse.y) ) { if ( endpos<gt->sel_start || endpos>=gt->sel_end ) GTextFieldDrawDDCursor(gt,endpos); } else if ( !GGadgetWithin(&gt->g,event->u.mouse.x,event->u.mouse.y) ) { GDrawPostDragEvent(gt->g.base,event,et_drag); } } else { if ( GGadgetInnerWithin(&gt->g,event->u.mouse.x,event->u.mouse.y) ) { if ( endpos>=gt->sel_start && endpos<gt->sel_end ) { gt->sel_start = gt->sel_end = endpos; } else { unichar_t *old=gt->oldtext, *temp; int pos=0; if ( event->u.mouse.state&ksm_control ) { temp = malloc((u_strlen(gt->text)+gt->sel_end-gt->sel_start+1)*sizeof(unichar_t)); memcpy(temp,gt->text,endpos*sizeof(unichar_t)); memcpy(temp+endpos,gt->text+gt->sel_start, (gt->sel_end-gt->sel_start)*sizeof(unichar_t)); u_strcpy(temp+endpos+gt->sel_end-gt->sel_start,gt->text+endpos); } else if ( endpos>=gt->sel_end ) { temp = u_copy(gt->text); memcpy(temp+gt->sel_start,temp+gt->sel_end, (endpos-gt->sel_end)*sizeof(unichar_t)); memcpy(temp+endpos-(gt->sel_end-gt->sel_start), gt->text+gt->sel_start,(gt->sel_end-gt->sel_start)*sizeof(unichar_t)); pos = endpos; } else /*if ( endpos<gt->sel_start )*/ { temp = u_copy(gt->text); memcpy(temp+endpos,gt->text+gt->sel_start, (gt->sel_end-gt->sel_start)*sizeof(unichar_t)); memcpy(temp+endpos+gt->sel_end-gt->sel_start,gt->text+endpos, (gt->sel_start-endpos)*sizeof(unichar_t)); pos = endpos+gt->sel_end-gt->sel_start; } gt->oldtext = gt->text; gt->sel_oldstart = gt->sel_start; gt->sel_oldend = gt->sel_end; gt->sel_oldbase = gt->sel_base; gt->sel_start = gt->sel_end = pos; gt->text = temp; free(old); GTextFieldRefigureLines(gt, endpos<gt->sel_oldstart?endpos:gt->sel_oldstart); } } else if ( !GGadgetWithin(&gt->g,event->u.mouse.x,event->u.mouse.y) ) { /* Don't delete the selection until someone actually accepts the drop */ /* Don't delete at all (copy not move) if control key is down */ if ( ( event->u.mouse.state&ksm_control ) ) GTextFieldGrabSelection(gt,sn_drag_and_drop); else GTextFieldGrabDDSelection(gt); GDrawPostDragEvent(gt->g.base,event,et_drop); } gt->drag_and_drop = false; GDrawSetCursor(gt->g.base,gt->old_cursor); _ggadget_redraw(&gt->g); } return( false ); } static int gtextfield_mouse(GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; GListField *ge = (GListField *) g; unichar_t *end=NULL, *end1, *end2; int i=0,ll,curlen; if ( gt->hidden_cursor ) { GDrawSetCursor(gt->g.base,gt->old_cursor); gt->hidden_cursor = false; _GWidget_ClearGrabGadget(g); } if ( !g->takes_input || (g->state!=gs_enabled && g->state!=gs_active && g->state!=gs_focused )) return( false ); if ( event->type == et_crossing ) return( false ); if ( gt->completionfield && ((GCompletionField *) gt)->choice_popup!=NULL && event->type==et_mousedown ) GCompletionDestroy((GCompletionField *) gt); if (( gt->listfield && event->u.mouse.x>=ge->buttonrect.x && event->u.mouse.x<ge->buttonrect.x+ge->buttonrect.width && event->u.mouse.y>=ge->buttonrect.y && event->u.mouse.y<ge->buttonrect.y+ge->buttonrect.height ) || ( gt->listfield && ge->popup!=NULL )) return( glistfield_mouse(ge,event)); if ( gt->numericfield && event->u.mouse.x>=ge->buttonrect.x && event->u.mouse.x<ge->buttonrect.x+ge->buttonrect.width && event->u.mouse.y>=ge->buttonrect.y && event->u.mouse.y<ge->buttonrect.y+ge->buttonrect.height ) return( gnumericfield_mouse(gt,event)); if (( event->type==et_mouseup || event->type==et_mousedown ) && (event->u.mouse.button>=4 && event->u.mouse.button<=7)) { int isv = event->u.mouse.button<=5; if ( event->u.mouse.state&ksm_shift ) isv = !isv; if ( isv && gt->vsb!=NULL ) return( GGadgetDispatchEvent(&gt->vsb->g,event)); else if ( !isv && gt->hsb!=NULL ) return( GGadgetDispatchEvent(&gt->hsb->g,event)); else return( true ); } if ( gt->pressed==NULL && event->type == et_mousemove && g->popup_msg!=NULL && GGadgetWithin(g,event->u.mouse.x,event->u.mouse.y)) GGadgetPreparePopup(g->base,g->popup_msg); curlen = u_strlen(gt->text); if ( event->type == et_mousedown || gt->pressed ) { i = (event->u.mouse.y-g->inner.y)/gt->fh + gt->loff_top; if ( i<0 ) i = 0; if ( !gt->multi_line ) i = 0; if ( i>=gt->lcnt ) { end = gt->text+curlen; i = gt->lcnt - 1; if (i < 0) i = 0; // Can lcnt ever be 0? } else end = GTextFieldGetPtFromPos(gt,i,event->u.mouse.x); } if ( event->type == et_mousedown ) { int end8; if ( event->u.mouse.button==3 && GGadgetWithin(g,event->u.mouse.x,event->u.mouse.y)) { GTFPopupMenu(gt,event); return( true ); } ll = gt->lines8[i+1]==-1?-1:gt->lines8[i+1]-gt->lines8[i]-1; GDrawLayoutInit(gt->g.base,gt->utf8_text+gt->lines8[i],ll,NULL); end8 = GDrawLayoutXYToIndex(gt->g.base,event->u.mouse.x-g->inner.x+gt->xoff_left,0); end1 = end2 = gt->text + gt->lines[i] + utf82u_index(end8,gt->utf8_text+gt->lines8[i]); gt->wordsel = gt->linesel = false; if ( event->u.mouse.button==1 && event->u.mouse.clicks>=3 ) { gt->sel_start = gt->lines[i]; gt->sel_end = gt->lines[i+1]; if ( gt->sel_end==-1 ) gt->sel_end = curlen; gt->wordsel = false; gt->linesel = true; } else if ( event->u.mouse.button==1 && event->u.mouse.clicks==2 ) { gt->sel_start = gt->sel_end = gt->sel_base = end-gt->text; gt->wordsel = true; GTextFieldSelectWords(gt,gt->sel_base); } else if ( end1-gt->text>=gt->sel_start && end2-gt->text<gt->sel_end && gt->sel_start!=gt->sel_end && event->u.mouse.button==1 ) { gt->drag_and_drop = true; if ( !gt->hidden_cursor ) gt->old_cursor = GDrawGetCursor(gt->g.base); GDrawSetCursor(gt->g.base,ct_draganddrop); } else if ( /*event->u.mouse.button!=3 &&*/ !(event->u.mouse.state&ksm_shift) ) { if ( event->u.mouse.button==1 ) GTextFieldGrabPrimarySelection(gt); gt->sel_start = gt->sel_end = gt->sel_base = end-gt->text; } else if ( end-gt->text>gt->sel_base ) { gt->sel_start = gt->sel_base; gt->sel_end = end-gt->text; } else { gt->sel_start = gt->sel_base = gt->sel_end = end-gt->text; } if ( gt->pressed==NULL ) gt->pressed = GDrawRequestTimer(gt->g.base,200,100,NULL); if ( gt->sel_start > curlen ) /* Ok to have selection at end, but beyond is an error */ fprintf( stderr, "About to crash\n" ); _ggadget_redraw(g); return( true ); } else if ( gt->pressed && (event->type == et_mousemove || event->type == et_mouseup )) { int refresh = true; if ( gt->drag_and_drop ) { refresh = GTextFieldDoDrop(gt,event,end-gt->text); // curlen may be inaccurate now, but we recalculate after this guard set. } else if ( gt->linesel ) { int j; for ( j=i; j>0 && gt->text[gt->lines[j]-1] != '\n'; --j ) ; gt->sel_start = gt->lines[j]; for ( j=i+1; gt->lines[j]!=-1 && gt->text[gt->lines[j]-1] != '\n'; ++j ) ; gt->sel_end = gt->lines[j]!=-1 ? gt->lines[j]-1 : curlen; } else if ( gt->wordsel ) GTextFieldSelectWords(gt,end-gt->text); else if ( event->u.mouse.button!=2 ) { int e = end-gt->text; if ( e>gt->sel_base ) { gt->sel_start = gt->sel_base; gt->sel_end = e; } else { gt->sel_start = e; gt->sel_end = gt->sel_base; } } if ( event->type==et_mouseup ) { GDrawCancelTimer(gt->pressed); gt->pressed = NULL; if ( event->u.mouse.button==2 ) GTextFieldPaste(gt,sn_primary); if ( gt->sel_start==gt->sel_end ) GTextField_Show(gt,gt->sel_start); GTextFieldChanged(gt,-1); if ( gt->sel_start<gt->sel_end && _GDraw_InsCharHook!=NULL && !gt->donthook ) (_GDraw_InsCharHook)(GDrawGetDisplayOfWindow(gt->g.base), gt->text[gt->sel_start]); } if ( gt->sel_end > u_strlen(gt->text) ) fprintf( stderr, "About to crash\n" ); if ( refresh ) _ggadget_redraw(g); return( true ); } return( false ); } static int gtextfield_key(GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; if ( !g->takes_input || (g->state!=gs_enabled && g->state!=gs_active && g->state!=gs_focused )) return( false ); if ( gt->listfield && ((GListField *) gt)->popup!=NULL ) { GWindow popup = ((GListField *) gt)->popup; (GDrawGetEH(popup))(popup,event); return( true ); } if ( gt->completionfield && ((GCompletionField *) gt)->choice_popup!=NULL && GCompletionHandleKey(gt,event)) return( true ); if ( event->type == et_charup ) return( false ); if ( event->u.chr.keysym == GK_F1 || event->u.chr.keysym == GK_Help || (event->u.chr.keysym == GK_Return && !gt->accepts_returns ) || ( event->u.chr.keysym == GK_Tab && !gt->accepts_tabs ) || event->u.chr.keysym == GK_BackTab || event->u.chr.keysym == GK_Escape ) return( false ); if ( !gt->hidden_cursor ) { /* hide the mouse pointer */ if ( !gt->drag_and_drop ) gt->old_cursor = GDrawGetCursor(gt->g.base); GDrawSetCursor(g->base,ct_invisible); gt->hidden_cursor = true; _GWidget_SetGrabGadget(g); /* so that we get the next mouse movement to turn the cursor on */ } if( gt->cursor_on ) { /* undraw the blinky text cursor if it is drawn */ gt_draw_cursor(g->base, gt); gt->cursor_on = false; } switch ( GTextFieldDoChange(gt,event)) { case 4: /* We should try name completion */ if ( gt->completionfield && ((GCompletionField *) gt)->completion!=NULL && gt->was_completing && gt->sel_start == u_strlen(gt->text)) GTextFieldComplete(gt,false); else GTextFieldChanged(gt,-1); break; case 3: /* They typed a Tab */ break; case 2: break; case true: GTextFieldChanged(gt,-1); break; case false: return( false ); } _ggadget_redraw(g); return( true ); } static int gtextfield_focus(GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; if ( g->state == gs_invisible || g->state == gs_disabled ) return( false ); if ( gt->cursor!=NULL ) { GDrawCancelTimer(gt->cursor); gt->cursor = NULL; gt->cursor_on = false; } if ( gt->hidden_cursor && !event->u.focus.gained_focus ) { GDrawSetCursor(gt->g.base,gt->old_cursor); gt->hidden_cursor = false; } gt->g.has_focus = event->u.focus.gained_focus; if ( event->u.focus.gained_focus ) { gt->cursor = GDrawRequestTimer(gt->g.base,400,400,NULL); gt->cursor_on = true; if ( event->u.focus.mnemonic_focus != mf_normal ) GTextFieldSelect(&gt->g,0,-1); if ( gt->gic!=NULL ) GTPositionGIC(gt); else if ( GWidgetGetInputContext(gt->g.base)!=NULL ) GDrawSetGIC(gt->g.base,GWidgetGetInputContext(gt->g.base),10000,10000); } _ggadget_redraw(g); GTextFieldFocusChanged(gt,event->u.focus.gained_focus); return( true ); } static int gtextfield_timer(GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; if ( !g->takes_input || (g->state!=gs_enabled && g->state!=gs_active && g->state!=gs_focused )) return(false); if ( gt->cursor == event->u.timer.timer ) { if ( gt->cursor_on ) { gt_draw_cursor(g->base, gt); gt->cursor_on = false; } else { gt->cursor_on = true; gt_draw_cursor(g->base, gt); } return( true ); } if ( gt->numeric_scroll == event->u.timer.timer ) { GTextFieldIncrement(gt,gt->incr_down?-1:1); return( true ); } if ( gt->pressed == event->u.timer.timer ) { GEvent e; GDrawSetFont(g->base,gt->font); GDrawGetPointerPosition(g->base,&e); if ( (e.u.mouse.x<g->r.x && gt->xoff_left>0 ) || (gt->multi_line && e.u.mouse.y<g->r.y && gt->loff_top>0 ) || ( e.u.mouse.x >= g->r.x + g->r.width && gt->xmax-gt->xoff_left>g->inner.width ) || ( e.u.mouse.y >= g->r.y + g->r.height && gt->lcnt-gt->loff_top > g->inner.height/gt->fh )) { int l = gt->loff_top + (e.u.mouse.y-g->inner.y)/gt->fh; int xpos; unichar_t *end; if ( e.u.mouse.y<g->r.y && gt->loff_top>0 ) l = --gt->loff_top; else if ( e.u.mouse.y >= g->r.y + g->r.height && gt->lcnt-gt->loff_top > g->inner.height/gt->fh ) { ++gt->loff_top; l = gt->loff_top + g->inner.width/gt->fh; } else if ( l<gt->loff_top ) l = gt->loff_top; else if ( l>=gt->loff_top + g->inner.height/gt->fh ) l = gt->loff_top + g->inner.height/gt->fh-1; if ( l>=gt->lcnt ) l = gt->lcnt-1; xpos = e.u.mouse.x+gt->xoff_left; if ( e.u.mouse.x<g->r.x && gt->xoff_left>0 ) { gt->xoff_left -= gt->nw; xpos = g->inner.x + gt->xoff_left; } else if ( e.u.mouse.x >= g->r.x + g->r.width && gt->xmax-gt->xoff_left>g->inner.width ) { gt->xoff_left += gt->nw; xpos = g->inner.x + gt->xoff_left + g->inner.width; } end = GTextFieldGetPtFromPos(gt,l,xpos); if ( end-gt->text > gt->sel_base ) { gt->sel_start = gt->sel_base; gt->sel_end = end-gt->text; } else { gt->sel_start = end-gt->text; gt->sel_end = gt->sel_base; } _ggadget_redraw(g); if ( gt->vsb!=NULL ) GScrollBarSetPos(&gt->vsb->g,gt->loff_top); if ( gt->hsb!=NULL ) GScrollBarSetPos(&gt->hsb->g,gt->xoff_left); } return( true ); } return( false ); } static int gtextfield_sel(GGadget *g, GEvent *event) { GTextField *gt = (GTextField *) g; unichar_t *end; int i; if ( event->type == et_selclear ) { if ( event->u.selclear.sel==sn_primary && gt->sel_start!=gt->sel_end ) { gt->sel_start = gt->sel_end = gt->sel_base; _ggadget_redraw(g); return( true ); } return( false ); } if ( gt->has_dd_cursor ) GTextFieldDrawDDCursor(gt,gt->dd_cursor_pos); GDrawSetFont(g->base,gt->font); i = (event->u.drag_drop.y-g->inner.y)/gt->fh + gt->loff_top; if ( !gt->multi_line ) i = 0; if ( i>=gt->lcnt ) end = gt->text+u_strlen(gt->text); else end = GTextFieldGetPtFromPos(gt,i,event->u.drag_drop.x); if ( event->type == et_drag ) { GTextFieldDrawDDCursor(gt,end-gt->text); } else if ( event->type == et_dragout ) { /* this event exists simply to clear the dd cursor line. We've done */ /* that already */ } else if ( event->type == et_drop ) { gt->sel_start = gt->sel_end = gt->sel_base = end-gt->text; GTextFieldPaste(gt,sn_drag_and_drop); GTextField_Show(gt,gt->sel_start); GTextFieldChanged(gt,-1); _ggadget_redraw(&gt->g); } else return( false ); return( true ); } static void gtextfield_destroy(GGadget *g) { GTextField *gt = (GTextField *) g; if ( gt==NULL ) return; if ( gt->listfield ) { GListField *glf = (GListField *) g; if ( glf->popup ) { /* Must cleanup the popup before we die */ /* We do this instead of GDrawDestroyWindow because this method is synchronous */ GEvent die; die.type = et_close; die.w = glf->popup; GDrawPostEvent(&die); } GTextInfoArrayFree(glf->ti); } if ( gt->completionfield ) GCompletionDestroy((GCompletionField *) g); if ( gt->vsb!=NULL ) (gt->vsb->g.funcs->destroy)(&gt->vsb->g); if ( gt->hsb!=NULL ) (gt->hsb->g.funcs->destroy)(&gt->hsb->g); GDrawCancelTimer(gt->numeric_scroll); GDrawCancelTimer(gt->pressed); GDrawCancelTimer(gt->cursor); free(gt->lines); free(gt->oldtext); free(gt->text); free(gt->utf8_text); free(gt->lines8); _ggadget_destroy(g); } static void GTextFieldSetTitle(GGadget *g,const unichar_t *tit) { GTextField *gt = (GTextField *) g; unichar_t *old = gt->oldtext; if ( tit==NULL || u_strcmp(tit,gt->text)==0 ) /* If it doesn't change anything, then don't trash undoes or selection */ return; gt->oldtext = gt->text; gt->sel_oldstart = gt->sel_start; gt->sel_oldend = gt->sel_end; gt->sel_oldbase = gt->sel_base; gt->text = u_copy(tit); /* tit might be oldtext, so must copy before freeing */ free(old); free(gt->utf8_text); gt->utf8_text = u2utf8_copy(gt->text); gt->sel_start = gt->sel_end = gt->sel_base = u_strlen(tit); GTextFieldRefigureLines(gt,0); GTextField_Show(gt,gt->sel_start); _ggadget_redraw(g); } static const unichar_t *_GTextFieldGetTitle(GGadget *g) { GTextField *gt = (GTextField *) g; return( gt->text ); } static void GTextFieldSetFont(GGadget *g,FontInstance *new) { GTextField *gt = (GTextField *) g; gt->font = new; GTextFieldRefigureLines(gt,0); } static FontInstance *GTextFieldGetFont(GGadget *g) { GTextField *gt = (GTextField *) g; return( gt->font ); } void GTextFieldShow(GGadget *g,int pos) { GTextField *gt = (GTextField *) g; GTextField_Show(gt,pos); _ggadget_redraw(g); } void GTextFieldSelect(GGadget *g,int start, int end) { GTextField *gt = (GTextField *) g; GTextFieldGrabPrimarySelection(gt); if ( end<0 ) { end = u_strlen(gt->text); if ( start<0 ) start = end; } if ( start>end ) { int temp = start; start = end; end = temp; } if ( end>u_strlen(gt->text)) end = u_strlen(gt->text); if ( start>u_strlen(gt->text)) start = end; else if ( start<0 ) start=0; gt->sel_start = gt->sel_base = start; gt->sel_end = end; _ggadget_redraw(g); /* Should be safe just to draw the textfield gadget, sbs won't have changed */ } void GTextFieldReplace(GGadget *g,const unichar_t *txt) { GTextField *gt = (GTextField *) g; GTextField_Replace(gt,txt); _ggadget_redraw(g); } static void GListFSelectOne(GGadget *g, int32 pos) { GListField *gl = (GListField *) g; int i; for ( i=0; i<gl->ltot; ++i ) gl->ti[i]->selected = false; if ( pos>=gl->ltot ) pos = gl->ltot-1; if ( pos<0 ) pos = 0; if ( gl->ltot>0 ) { gl->ti[pos]->selected = true; GTextFieldSetTitle(g,gl->ti[pos]->text); } } static int32 GListFIsSelected(GGadget *g, int32 pos) { GListField *gl = (GListField *) g; if ( pos>=gl->ltot ) return( false ); if ( pos<0 ) return( false ); if ( gl->ltot>0 ) return( gl->ti[pos]->selected ); return( false ); } static int32 GListFGetFirst(GGadget *g) { int i; GListField *gl = (GListField *) g; for ( i=0; i<gl->ltot; ++i ) if ( gl->ti[i]->selected ) return( i ); return( -1 ); } static GTextInfo **GListFGet(GGadget *g,int32 *len) { GListField *gl = (GListField *) g; if ( len!=NULL ) *len = gl->ltot; return( gl->ti ); } static GTextInfo *GListFGetItem(GGadget *g,int32 pos) { GListField *gl = (GListField *) g; if ( pos<0 || pos>=gl->ltot ) return( NULL ); return(gl->ti[pos]); } static void GListFSet(GGadget *g,GTextInfo **ti,int32 docopy) { GListField *gl = (GListField *) g; GTextInfoArrayFree(gl->ti); if ( docopy || ti==NULL ) ti = GTextInfoArrayCopy(ti); gl->ti = ti; gl->ltot = GTextInfoArrayCount(ti); } static void GListFClear(GGadget *g) { GListFSet(g,NULL,true); } static void gtextfield_redraw(GGadget *g) { GTextField *gt = (GTextField *) g; if ( gt->vsb!=NULL ) _ggadget_redraw((GGadget *) (gt->vsb)); if ( gt->hsb!=NULL ) _ggadget_redraw((GGadget *) (gt->hsb)); _ggadget_redraw(g); } static void gtextfield_move(GGadget *g, int32 x, int32 y ) { GTextField *gt = (GTextField *) g; int fxo=0, fyo=0, bxo, byo; if ( gt->listfield || gt->numericfield ) { fxo = ((GListField *) gt)->fieldrect.x - g->r.x; fyo = ((GListField *) gt)->fieldrect.y - g->r.y; bxo = ((GListField *) gt)->buttonrect.x - g->r.x; byo = ((GListField *) gt)->buttonrect.y - g->r.y; } if ( gt->vsb!=NULL ) _ggadget_move((GGadget *) (gt->vsb),x+(gt->vsb->g.r.x-g->r.x),y); if ( gt->hsb!=NULL ) _ggadget_move((GGadget *) (gt->hsb),x,y+(gt->hsb->g.r.y-g->r.y)); _ggadget_move(g,x,y); if ( gt->listfield || gt->numericfield ) { ((GListField *) gt)->fieldrect.x = g->r.x + fxo; ((GListField *) gt)->fieldrect.y = g->r.y + fyo; ((GListField *) gt)->buttonrect.x = g->r.x + bxo; ((GListField *) gt)->buttonrect.y = g->r.y + byo; } } static void gtextfield_resize(GGadget *g, int32 width, int32 height ) { GTextField *gt = (GTextField *) g; int gtwidth=width, gtheight=height, oldheight=0; int fxo=0, fwo=0, fyo=0, bxo, byo; int l; if ( gt->listfield || gt->numericfield ) { fxo = ((GListField *) gt)->fieldrect.x - g->r.x; fwo = g->r.width - ((GListField *) gt)->fieldrect.width; fyo = ((GListField *) gt)->fieldrect.y - g->r.y; bxo = g->r.x+g->r.width - ((GListField *) gt)->buttonrect.x; byo = ((GListField *) gt)->buttonrect.y - g->r.y; } if ( gt->hsb!=NULL ) { oldheight = gt->hsb->g.r.y+gt->hsb->g.r.height-g->r.y; gtheight = height - (oldheight-g->r.height); } if ( gt->vsb!=NULL ) { int oldwidth = gt->vsb->g.r.x+gt->vsb->g.r.width-g->r.x; gtwidth = width - (oldwidth-g->r.width); _ggadget_move((GGadget *) (gt->vsb),gt->vsb->g.r.x+width-oldwidth,gt->vsb->g.r.y); _ggadget_resize((GGadget *) (gt->vsb),gt->vsb->g.r.width,gtheight); } if ( gt->hsb!=NULL ) { _ggadget_move((GGadget *) (gt->hsb),gt->hsb->g.r.x,gt->hsb->g.r.y+height-oldheight); _ggadget_resize((GGadget *) (gt->hsb),gtwidth,gt->hsb->g.r.height); } _ggadget_resize(g,gtwidth, gtheight); if ( gt->hsb==NULL && gt->xoff_left!=0 && !gt->multi_line && GDrawGetTextWidth(gt->g.base,gt->text,-1)<gt->g.inner.width ) gt->xoff_left = 0; GTextFieldRefigureLines(gt,0); if ( gt->vsb!=NULL ) { GScrollBarSetBounds(&gt->vsb->g,0,gt->lcnt, gt->g.inner.height<gt->fh ? 1 : gt->g.inner.height/gt->fh); l = gt->loff_top; if ( gt->loff_top>gt->lcnt-gt->g.inner.height/gt->fh ) l = gt->lcnt-gt->g.inner.height/gt->fh; if ( l<0 ) l = 0; if ( l!=gt->loff_top ) { gt->loff_top = l; GScrollBarSetPos(&gt->vsb->g,l); _ggadget_redraw(&gt->g); } } if ( gt->listfield || gt->numericfield) { ((GListField *) gt)->fieldrect.x = g->r.x + fxo; ((GListField *) gt)->fieldrect.width = g->r.width -fwo; ((GListField *) gt)->fieldrect.y = g->r.y + fyo; ((GListField *) gt)->buttonrect.x = g->r.x+g->r.width - bxo; ((GListField *) gt)->buttonrect.y = g->r.y + byo; } } static GRect *gtextfield_getsize(GGadget *g, GRect *r ) { GTextField *gt = (GTextField *) g; _ggadget_getsize(g,r); if ( gt->vsb!=NULL ) r->width = gt->vsb->g.r.x+gt->vsb->g.r.width-g->r.x; if ( gt->hsb!=NULL ) r->height = gt->hsb->g.r.y+gt->hsb->g.r.height-g->r.y; return( r ); } static void gtextfield_setvisible(GGadget *g, int visible ) { GTextField *gt = (GTextField *) g; if ( gt->vsb!=NULL ) _ggadget_setvisible(&gt->vsb->g,visible); if ( gt->hsb!=NULL ) _ggadget_setvisible(&gt->hsb->g,visible); _ggadget_setvisible(g,visible); } static void gtextfield_setenabled(GGadget *g, int enabled ) { GTextField *gt = (GTextField *) g; if ( gt->vsb!=NULL ) _ggadget_setenabled(&gt->vsb->g,enabled); if ( gt->hsb!=NULL ) _ggadget_setenabled(&gt->hsb->g,enabled); _ggadget_setenabled(g,enabled); } static int gtextfield_vscroll(GGadget *g, GEvent *event) { enum sb sbt = event->u.control.u.sb.type; GTextField *gt = (GTextField *) (g->data); int loff = gt->loff_top; g = (GGadget *) gt; if ( sbt==et_sb_top ) loff = 0; else if ( sbt==et_sb_bottom ) { loff = gt->lcnt - gt->g.inner.height/gt->fh; } else if ( sbt==et_sb_up ) { if ( gt->loff_top!=0 ) loff = gt->loff_top-1; else loff = 0; } else if ( sbt==et_sb_down ) { if ( gt->loff_top + gt->g.inner.height/gt->fh >= gt->lcnt ) loff = gt->lcnt - gt->g.inner.height/gt->fh; else ++loff; } else if ( sbt==et_sb_uppage ) { int page = g->inner.height/gt->fh- (g->inner.height/gt->fh>2?1:0); loff = gt->loff_top - page; if ( loff<0 ) loff=0; } else if ( sbt==et_sb_downpage ) { int page = g->inner.height/gt->fh- (g->inner.height/gt->fh>2?1:0); loff = gt->loff_top + page; if ( loff + gt->g.inner.height/gt->fh >= gt->lcnt ) loff = gt->lcnt - gt->g.inner.height/gt->fh; } else /* if ( sbt==et_sb_thumb || sbt==et_sb_thumbrelease ) */ { loff = event->u.control.u.sb.pos; } if ( loff + gt->g.inner.height/gt->fh >= gt->lcnt ) loff = gt->lcnt - gt->g.inner.height/gt->fh; if ( loff<0 ) loff = 0; if ( loff!=gt->loff_top ) { gt->loff_top = loff; GScrollBarSetPos(&gt->vsb->g,loff); _ggadget_redraw(&gt->g); } return( true ); } static int gtextfield_hscroll(GGadget *g, GEvent *event) { enum sb sbt = event->u.control.u.sb.type; GTextField *gt = (GTextField *) (g->data); int xoff = gt->xoff_left; g = (GGadget *) gt; if ( sbt==et_sb_top ) xoff = 0; else if ( sbt==et_sb_bottom ) { xoff = gt->xmax - gt->g.inner.width; if ( xoff<0 ) xoff = 0; } else if ( sbt==et_sb_up ) { if ( gt->xoff_left>gt->nw ) xoff = gt->xoff_left-gt->nw; else xoff = 0; } else if ( sbt==et_sb_down ) { if ( gt->xoff_left + gt->nw + gt->g.inner.width >= gt->xmax ) xoff = gt->xmax - gt->g.inner.width; else xoff += gt->nw; } else if ( sbt==et_sb_uppage ) { int page = (3*g->inner.width)/4; xoff = gt->xoff_left - page; if ( xoff<0 ) xoff=0; } else if ( sbt==et_sb_downpage ) { int page = (3*g->inner.width)/4; xoff = gt->xoff_left + page; if ( xoff + gt->g.inner.width >= gt->xmax ) xoff = gt->xmax - gt->g.inner.width; } else /* if ( sbt==et_sb_thumb || sbt==et_sb_thumbrelease ) */ { xoff = event->u.control.u.sb.pos; } if ( xoff + gt->g.inner.width >= gt->xmax ) xoff = gt->xmax - gt->g.inner.width; if ( xoff<0 ) xoff = 0; if ( gt->xoff_left!=xoff ) { gt->xoff_left = xoff; GScrollBarSetPos(&gt->hsb->g,xoff); _ggadget_redraw(&gt->g); } return( true ); } static void GTextFieldSetDesiredSize(GGadget *g,GRect *outer,GRect *inner) { GTextField *gt = (GTextField *) g; if ( outer!=NULL ) { g->desired_width = outer->width; g->desired_height = outer->height; } else if ( inner!=NULL ) { int bp = GBoxBorderWidth(g->base,g->box); int extra=0; if ( gt->listfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize) + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&_GListMark_Box) + GBoxBorderWidth(gt->g.base,&glistfieldmenu_box); } else if ( gt->numericfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize)/2 + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&gnumericfieldspinner_box); } g->desired_width = inner->width + 2*bp + extra; g->desired_height = inner->height + 2*bp; if ( gt->multi_line ) { int sbadd = GDrawPointsToPixels(gt->g.base,_GScrollBar_Width) + GDrawPointsToPixels(gt->g.base,1); g->desired_width += sbadd; if ( !gt->wrap ) g->desired_height += sbadd; } } } static void GTextFieldGetDesiredSize(GGadget *g,GRect *outer,GRect *inner) { GTextField *gt = (GTextField *) g; int width=0, height; int extra=0; int bp = GBoxBorderWidth(g->base,g->box); if ( gt->listfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize) + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&_GListMark_Box) + GBoxBorderWidth(gt->g.base,&glistfieldmenu_box); } else if ( gt->numericfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize)/2 + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&gnumericfieldspinner_box); } width = GGadgetScale(GDrawPointsToPixels(gt->g.base,80)); height = gt->multi_line? 4*gt->fh:gt->fh; if ( g->desired_width>extra+2*bp ) width = g->desired_width - extra - 2*bp; if ( g->desired_height>2*bp ) height = g->desired_height - 2*bp; if ( gt->multi_line ) { int sbadd = GDrawPointsToPixels(gt->g.base,_GScrollBar_Width) + GDrawPointsToPixels(gt->g.base,1); width += sbadd; if ( !gt->wrap ) height += sbadd; } if ( inner!=NULL ) { inner->x = inner->y = 0; inner->width = width; inner->height = height; } if ( outer!=NULL ) { outer->x = outer->y = 0; outer->width = width + extra + 2*bp; outer->height = height + 2*bp; } } static int gtextfield_FillsWindow(GGadget *g) { return( ((GTextField *) g)->multi_line && g->prev==NULL && (_GWidgetGetGadgets(g->base)==g || _GWidgetGetGadgets(g->base)==(GGadget *) ((GTextField *) g)->vsb || _GWidgetGetGadgets(g->base)==(GGadget *) ((GTextField *) g)->hsb )); } struct gfuncs gtextfield_funcs = { 0, sizeof(struct gfuncs), gtextfield_expose, gtextfield_mouse, gtextfield_key, _gtextfield_editcmd, gtextfield_focus, gtextfield_timer, gtextfield_sel, gtextfield_redraw, gtextfield_move, gtextfield_resize, gtextfield_setvisible, gtextfield_setenabled, gtextfield_getsize, _ggadget_getinnersize, gtextfield_destroy, GTextFieldSetTitle, _GTextFieldGetTitle, NULL, NULL, NULL, GTextFieldSetFont, GTextFieldGetFont, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, GTextFieldGetDesiredSize, GTextFieldSetDesiredSize, gtextfield_FillsWindow, NULL }; struct gfuncs glistfield_funcs = { 0, sizeof(struct gfuncs), gtextfield_expose, gtextfield_mouse, gtextfield_key, gtextfield_editcmd, gtextfield_focus, gtextfield_timer, gtextfield_sel, gtextfield_redraw, gtextfield_move, gtextfield_resize, gtextfield_setvisible, gtextfield_setenabled, gtextfield_getsize, _ggadget_getinnersize, gtextfield_destroy, GTextFieldSetTitle, _GTextFieldGetTitle, NULL, NULL, NULL, GTextFieldSetFont, GTextFieldGetFont, GListFClear, GListFSet, GListFGet, GListFGetItem, NULL, GListFSelectOne, GListFIsSelected, GListFGetFirst, NULL, NULL, NULL, GTextFieldGetDesiredSize, GTextFieldSetDesiredSize, NULL, NULL }; static void GTextFieldInit() { FontRequest rq; memset(&rq,0,sizeof(rq)); GGadgetInit(); GDrawDecomposeFont(_ggadget_default_font,&rq); rq.family_name = NULL; rq.utf8_family_name = MONO_UI_FAMILIES; _gtextfield_font = GDrawInstanciateFont(NULL,&rq); _GGadgetCopyDefaultBox(&_GGadget_gtextfield_box); _GGadget_gtextfield_box.padding = 3; /*_GGadget_gtextfield_box.flags = box_active_border_inner;*/ _gtextfield_font = _GGadgetInitDefaultBox("GTextField.",&_GGadget_gtextfield_box,_gtextfield_font); glistfield_box = _GGadget_gtextfield_box; _GGadgetInitDefaultBox("GComboBox.",&glistfield_box,_gtextfield_font); glistfieldmenu_box = glistfield_box; glistfieldmenu_box.padding = 1; _GGadgetInitDefaultBox("GComboBoxMenu.",&glistfieldmenu_box,_gtextfield_font); gnumericfield_box = _GGadget_gtextfield_box; _GGadgetInitDefaultBox("GNumericField.",&gnumericfield_box,_gtextfield_font); gnumericfieldspinner_box = gnumericfield_box; gnumericfieldspinner_box.border_type = bt_none; gnumericfieldspinner_box.border_width = 0; gnumericfieldspinner_box.padding = 0; _GGadgetInitDefaultBox("GNumericFieldSpinner.",&gnumericfieldspinner_box,_gtextfield_font); gtextfield_inited = true; } static void GTextFieldAddVSb(GTextField *gt) { GGadgetData gd; memset(&gd,'\0',sizeof(gd)); gd.pos.y = gt->g.r.y; gd.pos.height = gt->g.r.height; gd.pos.width = GDrawPointsToPixels(gt->g.base,_GScrollBar_Width); gd.pos.x = gt->g.r.x+gt->g.r.width - gd.pos.width; gd.flags = (gt->g.state==gs_invisible?0:gg_visible)|gg_enabled|gg_pos_in_pixels|gg_sb_vert; gd.handle_controlevent = gtextfield_vscroll; gt->vsb = (GScrollBar *) GScrollBarCreate(gt->g.base,&gd,gt); gt->vsb->g.contained = true; gd.pos.width += GDrawPointsToPixels(gt->g.base,1); gt->g.r.width -= gd.pos.width; gt->g.inner.width -= gd.pos.width; } static void GTextFieldAddHSb(GTextField *gt) { GGadgetData gd; memset(&gd,'\0',sizeof(gd)); gd.pos.x = gt->g.r.x; gd.pos.width = gt->g.r.width; gd.pos.height = GDrawPointsToPixels(gt->g.base,_GScrollBar_Width); gd.pos.y = gt->g.r.y+gt->g.r.height - gd.pos.height; gd.flags = (gt->g.state==gs_invisible?0:gg_visible)|gg_enabled|gg_pos_in_pixels; gd.handle_controlevent = gtextfield_hscroll; gt->hsb = (GScrollBar *) GScrollBarCreate(gt->g.base,&gd,gt); gt->hsb->g.contained = true; gd.pos.height += GDrawPointsToPixels(gt->g.base,1); gt->g.r.height -= gd.pos.height; gt->g.inner.height -= gd.pos.height; if ( gt->vsb!=NULL ) { gt->vsb->g.r.height -= gd.pos.height; gt->vsb->g.inner.height -= gd.pos.height; } } static void GTextFieldFit(GTextField *gt) { GTextBounds bounds; int as=0, ds, ld, width=0; GRect inner, outer; int bp = GBoxBorderWidth(gt->g.base,gt->g.box); { FontInstance *old = GDrawSetFont(gt->g.base,gt->font); FontRequest rq; int tries; for ( tries = 0; tries<2; ++tries ) { width = GDrawGetTextBounds(gt->g.base,gt->text, -1, &bounds); GDrawWindowFontMetrics(gt->g.base,gt->font,&as, &ds, &ld); if ( gt->g.r.height==0 || as+ds-3+2*bp<=gt->g.r.height || tries==1 ) break; /* Doesn't fit. Try a smaller size */ GDrawDecomposeFont(gt->font,&rq); --rq.point_size; gt->font = GDrawInstanciateFont(gt->g.base,&rq); } gt->fh = as+ds; gt->as = as; gt->nw = GDrawGetTextWidth(gt->g.base,nstr, 1); GDrawSetFont(gt->g.base,old); } GTextFieldGetDesiredSize(&gt->g,&outer,&inner); if ( gt->g.r.width==0 ) { int extra=0; if ( gt->listfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize) + 2*GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + GBoxBorderWidth(gt->g.base,&_GListMark_Box); } else if ( gt->numericfield ) { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize)/2 + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&gnumericfieldspinner_box); } gt->g.r.width = outer.width; gt->g.inner.width = inner.width; gt->g.inner.x = gt->g.r.x + (outer.width-inner.width-extra)/2; } else { gt->g.inner.x = gt->g.r.x + bp; gt->g.inner.width = gt->g.r.width - 2*bp; } if ( gt->g.r.height==0 ) { gt->g.r.height = outer.height; gt->g.inner.height = inner.height; gt->g.inner.y = gt->g.r.y + (outer.height-gt->g.inner.height)/2; } else { gt->g.inner.y = gt->g.r.y + bp; gt->g.inner.height = gt->g.r.height - 2*bp; } if ( gt->multi_line ) { GTextFieldAddVSb(gt); if ( !gt->wrap ) GTextFieldAddHSb(gt); } if ( gt->listfield || gt->numericfield ) { GListField *ge = (GListField *) gt; int extra; if ( gt->listfield ) extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize) + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&_GListMark_Box)+ GBoxBorderWidth(gt->g.base,&glistfieldmenu_box); else { extra = GDrawPointsToPixels(gt->g.base,_GListMarkSize)/2 + GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip) + 2*GBoxBorderWidth(gt->g.base,&gnumericfieldspinner_box); } ge->fieldrect = ge->buttonrect = gt->g.r; ge->fieldrect.width -= extra; extra -= GDrawPointsToPixels(gt->g.base,_GGadget_TextImageSkip)/2; ge->buttonrect.x = ge->buttonrect.x+ge->buttonrect.width-extra; ge->buttonrect.width = extra; if ( gt->numericfield ) ++ge->fieldrect.width; } } static GTextField *_GTextFieldCreate(GTextField *gt, struct gwindow *base, GGadgetData *gd,void *data, GBox *def) { if ( !gtextfield_inited ) GTextFieldInit(); gt->g.funcs = &gtextfield_funcs; _GGadget_Create(&gt->g,base,gd,data,def); gt->g.takes_input = true; gt->g.takes_keyboard = true; gt->g.focusable = true; if ( gd->label!=NULL ) { if ( gd->label->text_is_1byte ) gt->text = /* def2u_*/ utf82u_copy((char *) gd->label->text); else if ( gd->label->text_in_resource ) gt->text = u_copy((unichar_t *) GStringGetResource((intpt) gd->label->text,&gt->g.mnemonic)); else gt->text = u_copy(gd->label->text); gt->sel_start = gt->sel_end = gt->sel_base = u_strlen(gt->text); } if ( gt->text==NULL ) gt->text = calloc(1,sizeof(unichar_t)); gt->font = _gtextfield_font; if ( gd->label!=NULL && gd->label->font!=NULL ) gt->font = gd->label->font; if ( (gd->flags & gg_textarea_wrap) && gt->multi_line ) gt->wrap = true; else if ( (gd->flags & gg_textarea_wrap) ) /* only used by gchardlg.c no need to make it look nice */ gt->donthook = true; GTextFieldFit(gt); _GGadget_FinalPosition(&gt->g,base,gd); GTextFieldRefigureLines(gt,0); if ( gd->flags & gg_group_end ) _GGadgetCloseGroup(&gt->g); GWidgetIndicateFocusGadget(&gt->g); if ( gd->flags & gg_text_xim ) gt->gic = GWidgetCreateInputContext(base,gic_overspot|gic_orlesser); return( gt ); } GGadget *GTextFieldCreate(struct gwindow *base, GGadgetData *gd,void *data) { GTextField *gt = _GTextFieldCreate(calloc(1,sizeof(GTextField)),base,gd,data,&_GGadget_gtextfield_box); return( &gt->g ); } GGadget *GPasswordCreate(struct gwindow *base, GGadgetData *gd,void *data) { GTextField *gt = _GTextFieldCreate(calloc(1,sizeof(GTextField)),base,gd,data,&_GGadget_gtextfield_box); gt->password = true; GTextFieldRefigureLines(gt, 0); return( &gt->g ); } GGadget *GNumericFieldCreate(struct gwindow *base, GGadgetData *gd,void *data) { GTextField *gt = calloc(1,sizeof(GNumericField)); gt->numericfield = true; _GTextFieldCreate(gt,base,gd,data,&gnumericfield_box); return( &gt->g ); } GGadget *GTextCompletionCreate(struct gwindow *base, GGadgetData *gd,void *data) { GTextField *gt = calloc(1,sizeof(GCompletionField)); gt->accepts_tabs = true; gt->completionfield = true; gt->was_completing = true; ((GCompletionField *) gt)->completion = gd->u.completion; _GTextFieldCreate(gt,base,gd,data,&_GGadget_gtextfield_box); gt->accepts_tabs = ((GCompletionField *) gt)->completion != NULL; return( &gt->g ); } GGadget *GTextAreaCreate(struct gwindow *base, GGadgetData *gd,void *data) { GTextField *gt = calloc(1,sizeof(GTextField)); gt->multi_line = true; gt->accepts_returns = true; _GTextFieldCreate(gt,base,gd,data,&_GGadget_gtextfield_box); return( &gt->g ); } static void GListFieldSelected(GGadget *g, int i) { GListField *ge = (GListField *) g; ge->popup = NULL; _GWidget_ClearGrabGadget(&ge->gt.g); if ( i<0 || i>=ge->ltot || ge->ti[i]->text==NULL ) return; GTextFieldSetTitle(g,ge->ti[i]->text); _ggadget_redraw(g); GTextFieldChanged(&ge->gt,i); } GGadget *GSimpleListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data) { GListField *ge = calloc(1,sizeof(GListField)); ge->gt.listfield = true; if ( gd->u.list!=NULL ) ge->ti = GTextInfoArrayFromList(gd->u.list,&ge->ltot); _GTextFieldCreate(&ge->gt,base,gd,data,&glistfield_box); ge->gt.g.funcs = &glistfield_funcs; return( &ge->gt.g ); } static unichar_t **GListField_NameCompletion(GGadget *t,int from_tab) { const unichar_t *spt; unichar_t **ret; GTextInfo **ti; int32 len; int i, cnt, doit, match_len; spt = _GGadgetGetTitle(t); if ( spt==NULL ) return( NULL ); match_len = u_strlen(spt); ti = GGadgetGetList(t,&len); ret = NULL; for ( doit=0; doit<2; ++doit ) { cnt=0; for ( i=0; i<len; ++i ) { if ( ti[i]->text && u_strncmp(ti[i]->text,spt,match_len)==0 ) { if ( doit ) ret[cnt] = u_copy(ti[i]->text); ++cnt; } } if ( doit ) ret[cnt] = NULL; else if ( cnt==0 ) return( NULL ); else ret = malloc((cnt+1)*sizeof(unichar_t *)); } return( ret ); } GGadget *GListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data) { GListField *ge = calloc(1,sizeof(GCompletionField)); ge->gt.listfield = true; if ( gd->u.list!=NULL ) ge->ti = GTextInfoArrayFromList(gd->u.list,&ge->ltot); ge->gt.accepts_tabs = true; ge->gt.completionfield = true; /* ge->gt.was_completing = true; */ ((GCompletionField *) ge)->completion = GListField_NameCompletion; _GTextFieldCreate(&ge->gt,base,gd,data,&_GGadget_gtextfield_box); ge->gt.g.funcs = &glistfield_funcs; return( &ge->gt.g ); } /* ************************************************************************** */ /* ***************************** text completion **************************** */ /* ************************************************************************** */ static void GCompletionDestroy(GCompletionField *gc) { int i; if ( gc->choice_popup!=NULL ) { GWindow cp = gc->choice_popup; gc->choice_popup = NULL; GDrawSetUserData(cp,NULL); GDrawDestroyWindow(cp); } if ( gc->choices!=NULL ) { for ( i=0; gc->choices[i]!=NULL; ++i ) free(gc->choices[i]); free(gc->choices); gc->choices = NULL; } } static int GTextFieldSetTitleRmDotDotDot(GGadget *g,unichar_t *tit) { unichar_t *pt = uc_strstr(tit," ..."); if ( pt!=NULL ) *pt = '\0'; GTextFieldSetTitle(g,tit); if ( pt!=NULL ) *pt = ' '; return( pt!=NULL ); } static int popup_eh(GWindow popup,GEvent *event) { GGadget *owner = GDrawGetUserData(popup); GTextField *gt = (GTextField *) owner; GCompletionField *gc = (GCompletionField *) owner; GRect old1, r; Color fg; int i, bp; if ( owner==NULL ) /* dying */ return( true ); bp = GBoxBorderWidth(owner->base,owner->box); if ( event->type == et_expose ) { GDrawPushClip(popup,&event->u.expose.rect,&old1); GDrawSetFont(popup,gt->font); GBoxDrawBackground(popup,&event->u.expose.rect,owner->box, owner->state,false); GDrawGetSize(popup,&r); r.x = r.y = 0; GBoxDrawBorder(popup,&r,owner->box,owner->state,false); r.x += bp; r.width -= 2*bp; fg = owner->box->main_foreground==COLOR_DEFAULT?GDrawGetDefaultForeground(GDrawGetDisplayOfWindow(popup)): owner->box->main_foreground; for ( i=0; gc->choices[i]!=NULL; ++i ) { if ( i==gc->selected ) { r.y = i*gt->fh+bp; r.height = gt->fh; GDrawFillRect(popup,&r,owner->box->active_border); } GDrawDrawText(popup,bp,i*gt->fh+gt->as+bp,gc->choices[i],-1,fg); } GDrawPopClip(popup,&old1); } else if ( event->type == et_mouseup ) { gc->selected = (event->u.mouse.y-bp)/gt->fh; if ( gc->selected>=0 && gc->selected<gc->ctot ) { int tryagain = GTextFieldSetTitleRmDotDotDot(owner,gc->choices[gc->selected]); GTextFieldChanged(gt,-1); GCompletionDestroy(gc); if ( tryagain ) GTextFieldComplete(gt,false); } else { gc->selected = -1; GDrawRequestExpose(popup,NULL,false); } } else if ( event->type == et_char ) { return( gtextfield_key(owner,event)); } return( true ); } static void GCompletionCreatePopup(GCompletionField *gc) { int width, maxw, i; GWindowAttrs pattrs; GWindow base = gc->gl.gt.g.base; GDisplay *disp = GDrawGetDisplayOfWindow(base); GWindow root = GDrawGetRoot(disp); int bp = GBoxBorderWidth(base,gc->gl.gt.g.box); GRect pos, screen; GPoint pt; GDrawSetFont(base,gc->gl.gt.font); maxw = 0; for ( i=0; i<gc->ctot; ++i ) { width = GDrawGetTextWidth(base,gc->choices[i],-1); if ( width > maxw ) maxw = width; } maxw += 2*bp; pos.width = maxw; pos.height = gc->gl.gt.fh*gc->ctot+2*bp; if ( pos.width < gc->gl.gt.g.r.width ) pos.width = gc->gl.gt.g.r.width; pattrs.mask = wam_events|wam_nodecor|wam_positioned|wam_cursor| wam_transient|wam_verytransient/*|wam_bordwidth|wam_bordcol*/; pattrs.event_masks = -1; pattrs.nodecoration = true; pattrs.positioned = true; pattrs.cursor = ct_pointer; pattrs.transient = GWidgetGetTopWidget(base); pattrs.border_width = 1; pattrs.border_color = gc->gl.gt.g.box->main_foreground; GDrawGetSize(root,&screen); pt.x = gc->gl.gt.g.r.x; pt.y = gc->gl.gt.g.r.y + gc->gl.gt.g.r.height; GDrawTranslateCoordinates(base,root,&pt); if ( pt.y+pos.height > screen.height ) { if ( pt.y-gc->gl.gt.g.r.height-pos.height>=0 ) { /* Is there more room above the widget ?? */ pt.y -= gc->gl.gt.g.r.height; pt.y -= pos.height; } else if ( pt.x + gc->gl.gt.g.r.width + maxw <= screen.width ) { pt.x += gc->gl.gt.g.r.width; pt.y = 0; } else pt.x = pt.y = 0; } pos.x = pt.x; pos.y = pt.y; gc->choice_popup = GWidgetCreateTopWindow(disp,&pos,popup_eh,gc,&pattrs); GDrawSetGIC(gc->choice_popup,GWidgetCreateInputContext(gc->choice_popup,gic_overspot|gic_orlesser), gc->gl.gt.g.inner.x,gc->gl.gt.g.inner.y+gc->gl.gt.as); GDrawSetVisible(gc->choice_popup,true); /* Don't grab this one. User should be free to ignore it */ } static int ucmp(const void *_s1, const void *_s2) { return( u_strcmp(*(const unichar_t **)_s1,*(const unichar_t **)_s2)); } #define MAXLINES 30 /* Maximum # entries allowed in popup window */ #define MAXBRACKETS 30 /* Maximum # chars allowed in [] pairs */ static void GTextFieldComplete(GTextField *gt,int from_tab) { GCompletionField *gc = (GCompletionField *) gt; unichar_t **ret; int i, len, orig_len; unichar_t *pt1, *pt2, ch; /* If not from_tab, then the textfield has already been changed and we */ /* must mark it as such (but don't mark twice) */ ret = (gc->completion)(&gt->g,from_tab); if ( ret==NULL || ret[0]==NULL ) { if ( from_tab ) GDrawBeep(NULL); else GTextFieldChanged(gt,-1); free(ret); } else { orig_len = u_strlen(gt->text); len = u_strlen(ret[0]); for ( i=1; ret[i]!=NULL; ++i ) { for ( pt1=ret[0], pt2=ret[i]; *pt1==*pt2 && pt1-ret[0]<len ; ++pt1, ++pt2 ); len = pt1-ret[0]; } if ( orig_len!=len ) { ch = ret[0][len]; ret[0][len] = '\0'; GTextFieldSetTitle(&gt->g,ret[0]); ret[0][len] = ch; if ( !from_tab ) GTextFieldSelect(&gt->g,orig_len,len); GTextFieldChanged(gt,-1); } else if ( !from_tab ) GTextFieldChanged(gt,-1); if ( ret[1]!=NULL ) { gc->choices = ret; gc->selected = -1; if ( from_tab ) GDrawBeep(NULL); qsort(ret,i,sizeof(unichar_t *),ucmp); gc->ctot = i; if ( i>=MAXLINES ) { /* Try to shrink the list by just showing initial stubs of the */ /* names with multiple entries with a common next character */ /* So if we have matched against "a" and we have "abc", "abd" "acc" */ /* the show "ab..." and "acc" */ unichar_t **ret2=NULL, last_ch = -1; int cnt, doit, type2=false; for ( doit=0; doit<2; ++doit ) { for ( i=cnt=0; ret[i]!=NULL; ++i ) { if ( last_ch!=ret[i][len] ) { if ( doit && type2 ) { int c2 = cnt/MAXBRACKETS, c3 = cnt%MAXBRACKETS; if ( ret[i][len]=='\0' ) continue; if ( c3==0 ) { ret2[c2] = calloc((len+MAXBRACKETS+2+4+1),sizeof(unichar_t)); memcpy(ret2[c2],ret[i],len*sizeof(unichar_t)); ret2[c2][len] = '['; } ret2[c2][len+1+c3] = ret[i][len]; uc_strcpy(ret2[c2]+len+2+c3,"] ..."); } else if ( doit ) { ret2[cnt] = malloc((u_strlen(ret[i])+5)*sizeof(unichar_t)); u_strcpy(ret2[cnt],ret[i]); } ++cnt; last_ch = ret[i][len]; } else if ( doit && !type2 ) { int j; for ( j=len+1; ret[i][j]!='\0' && ret[i][j] == ret2[cnt-1][j]; ++j ); uc_strcpy(ret2[cnt-1]+j," ..."); } } if ( cnt>=MAXLINES*MAXBRACKETS ) break; if ( cnt>=MAXLINES && !doit ) { type2 = (cnt+MAXBRACKETS-1)/MAXBRACKETS; ret2 = malloc((type2+1)*sizeof(unichar_t *)); } else if ( !doit ) ret2 = malloc((cnt+1)*sizeof(unichar_t *)); else { if ( type2 ) cnt = type2; ret2[cnt] = NULL; } } if ( ret2!=NULL ) { for ( i=0; ret[i]!=NULL; ++i ) free(ret[i]); free(ret); ret = gc->choices = ret2; i = gc->ctot = cnt; } } if ( gc->ctot>=MAXLINES ) { /* Too many choices. Don't popup a list of them */ gc->choices = NULL; for ( i=0; ret[i]!=NULL; ++i ) free(ret[i]); free(ret); } else { gc->ctot = i; GCompletionCreatePopup(gc); } } else { free(ret[1]); free(ret); } } } static int GCompletionHandleKey(GTextField *gt,GEvent *event) { GCompletionField *gc = (GCompletionField *) gt; int dir = 0; if ( gc->choice_popup==NULL || event->type == et_charup ) return( false ); if ( event->u.chr.keysym == GK_Up || event->u.chr.keysym == GK_KP_Up ) dir = -1; else if ( event->u.chr.keysym == GK_Down || event->u.chr.keysym == GK_KP_Down ) dir = 1; if ( dir==0 || event->u.chr.chars[0]!='\0' ) { /* For normal characters we destroy the popup window and pretend it */ /* wasn't there */ GCompletionDestroy(gc); if ( event->u.chr.keysym == GK_Escape ) gt->was_completing = false; return( event->u.chr.keysym == GK_Escape || /* Eat an escape, other chars will be processed further */ event->u.chr.keysym == GK_Return ); } if (( gc->selected==-1 && dir==-1 ) || ( gc->selected==gc->ctot-1 && dir==1 )) return( true ); gc->selected += dir; if ( gc->selected!=-1 ) GTextFieldSetTitleRmDotDotDot(&gt->g,gc->choices[gc->selected]); GTextFieldChanged(gt,-1); GDrawRequestExpose(gc->choice_popup,NULL,false); return( true ); } void GCompletionFieldSetCompletion(GGadget *g,GTextCompletionHandler completion) { ((GCompletionField *) g)->completion = completion; ((GTextField *) g)->accepts_tabs = ((GCompletionField *) g)->completion != NULL; } void GCompletionFieldSetCompletionMode(GGadget *g,int enabled) { ((GTextField *) g)->was_completing = enabled; } GResInfo *_GTextFieldRIHead(void) { if ( !gtextfield_inited ) GTextFieldInit(); return( &gtextfield_ri ); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_1050_4
crossvul-cpp_data_good_5353_1
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Quadrature Mirror-Image Filter Bank (QMFB) Library * * $Id$ */ /******************************************************************************\ * \******************************************************************************/ #undef WT_LENONE /* This is not needed due to normalization. */ #define WT_DOSCALE /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_fix.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" #include "jpc_qmfb.h" #include "jpc_tsfb.h" #include "jpc_math.h" /******************************************************************************\ * \******************************************************************************/ #define QMFB_SPLITBUFSIZE 4096 #define QMFB_JOINBUFSIZE 4096 int jpc_ft_analyze(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride); int jpc_ft_synthesize(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride); int jpc_ns_analyze(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride); int jpc_ns_synthesize(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride); void jpc_ft_fwdlift_row(jpc_fix_t *a, int numcols, int parity); void jpc_ft_fwdlift_col(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ft_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ft_fwdlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); void jpc_ft_invlift_row(jpc_fix_t *a, int numcols, int parity); void jpc_ft_invlift_col(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ft_invlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ft_invlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); void jpc_ns_fwdlift_row(jpc_fix_t *a, int numcols, int parity); void jpc_ns_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ns_fwdlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); void jpc_ns_invlift_row(jpc_fix_t *a, int numcols, int parity); void jpc_ns_invlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_ns_invlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); void jpc_qmfb_split_row(jpc_fix_t *a, int numcols, int parity); void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_qmfb_split_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); void jpc_qmfb_join_row(jpc_fix_t *a, int numcols, int parity); void jpc_qmfb_join_col(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_qmfb_join_colgrp(jpc_fix_t *a, int numrows, int stride, int parity); void jpc_qmfb_join_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity); double jpc_ft_lpenergywts[32] = { 1.2247448713915889, 1.6583123951776999, 2.3184046238739260, 3.2691742076555053, 4.6199296531440819, 6.5323713152269596, 9.2377452606141937, 13.0639951297449581, 18.4752262333915667, 26.1278968190610392, 36.9504194305524791, 52.2557819580462777, 73.9008347315741645, 104.5115624560829133, 147.8016689469569656, 209.0231247296646018, 295.6033378293900000, 418.0462494347059419, 591.2066756503630813, 836.0924988714708661, /* approximations */ 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661, 836.0924988714708661 }; double jpc_ft_hpenergywts[32] = { 0.8477912478906585, 0.9601432184835760, 1.2593401049756179, 1.7444107171191079, 2.4538713036750726, 3.4656517695088755, 4.8995276398597856, 6.9283970402160842, 9.7980274940131444, 13.8564306871112652, 19.5959265076535587, 27.7128159494245487, 39.1918369552045860, 55.4256262207444053, 78.3836719028959124, 110.8512517317256822, 156.7673435548526868, 221.7025033739244293, 313.5346870787551552, 443.4050067351659550, /* approximations */ 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550, 443.4050067351659550 }; double jpc_ns_lpenergywts[32] = { 1.4021081679297411, 2.0303718560817923, 2.9011625562785555, 4.1152851751758002, 5.8245108637728071, 8.2387599345725171, 11.6519546479210838, 16.4785606470644375, 23.3042776444606794, 32.9572515613740435, 46.6086013487782793, 65.9145194076860861, 93.2172084551803977, 131.8290408510004283, 186.4344176300625691, 263.6580819564562148, 372.8688353500955373, 527.3161639447193920, 745.7376707114038936, 1054.6323278917823245, /* approximations follow */ 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245, 1054.6323278917823245 }; double jpc_ns_hpenergywts[32] = { 1.4425227650161456, 1.9669426082455688, 2.8839248082788891, 4.1475208393432981, 5.8946497530677817, 8.3471789178590949, 11.8086046551047463, 16.7012780415647804, 23.6196657032246620, 33.4034255108592362, 47.2396388881632632, 66.8069597416714061, 94.4793162154500692, 133.6139330736999113, 188.9586372358249378, 267.2278678461869390, 377.9172750722391356, 534.4557359047058753, 755.8345502191498326, 1068.9114718353569060, /* approximations follow */ 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060, 1068.9114718353569060 }; jpc_qmfb2d_t jpc_ft_qmfb2d = { jpc_ft_analyze, jpc_ft_synthesize, jpc_ft_lpenergywts, jpc_ft_hpenergywts }; jpc_qmfb2d_t jpc_ns_qmfb2d = { jpc_ns_analyze, jpc_ns_synthesize, jpc_ns_lpenergywts, jpc_ns_hpenergywts }; /******************************************************************************\ * generic \******************************************************************************/ void jpc_qmfb_split_row(jpc_fix_t *a, int numcols, int parity) { int bufsize = JPC_CEILDIVPOW2(numcols, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numcols >= 2) { hstartcol = (numcols + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numcols - hstartcol); m = numcols - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[1 - parity]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += 2; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[1 - parity]; srcptr = &a[2 - parity]; n = numcols - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += 2; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } } void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartrow; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartrow = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartrow : (numrows - hstartrow); m = numrows - hstartrow; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartrow * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } } void jpc_qmfb_split_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartrow; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc3(bufsize, JPC_QMFB_COLGRPSIZE, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartrow = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartrow : (numrows - hstartrow); m = numrows - hstartrow; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += JPC_QMFB_COLGRPSIZE; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartrow * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += JPC_QMFB_COLGRPSIZE; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } } void jpc_qmfb_split_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartcol; /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_alloc3(bufsize, numcols, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; // ORIGINAL (WRONG): m = (parity) ? hstartcol : (numrows - hstartcol); m = numrows - hstartcol; /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += numcols; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += numcols; } } /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } } void jpc_qmfb_join_row(jpc_fix_t *a, int numcols, int parity) { int bufsize = JPC_CEILDIVPOW2(numcols, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE]; jpc_fix_t *buf = joinbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numcols + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { *dstptr = *srcptr; ++srcptr; ++dstptr; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol]; dstptr = &a[1 - parity]; n = numcols - hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity]; n = hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } } void jpc_qmfb_join_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE]; jpc_fix_t *buf = joinbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_alloc2(bufsize, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numrows + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { *dstptr = *srcptr; srcptr += stride; ++dstptr; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol * stride]; dstptr = &a[(1 - parity) * stride]; n = numrows - hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2 * stride; srcptr += stride; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity * stride]; n = hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2 * stride; ++srcptr; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } } void jpc_qmfb_join_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = joinbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_alloc3(bufsize, JPC_QMFB_COLGRPSIZE, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numrows + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } srcptr += stride; dstptr += JPC_QMFB_COLGRPSIZE; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol * stride]; dstptr = &a[(1 - parity) * stride]; n = numrows - hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += stride; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity * stride]; n = hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += JPC_QMFB_COLGRPSIZE; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } } void jpc_qmfb_join_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE]; jpc_fix_t *buf = joinbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int hstartcol; /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_alloc3(bufsize, numcols, sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } hstartcol = (numrows + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } srcptr += stride; dstptr += numcols; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol * stride]; dstptr = &a[(1 - parity) * stride]; n = numrows - hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += stride; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity * stride]; n = hstartcol; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += 2 * stride; srcptr += numcols; } /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } } /******************************************************************************\ * 5/3 transform \******************************************************************************/ void jpc_ft_fwdlift_row(jpc_fix_t *a, int numcols, int parity) { register jpc_fix_t *lptr; register jpc_fix_t *hptr; register int n; int llen; llen = (numcols + 1 - parity) >> 1; if (numcols > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { hptr[0] -= lptr[0]; ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { //hptr[0] -= (lptr[0] + lptr[1]) >> 1; hptr[0] -= jpc_fix_asr(lptr[0] + lptr[1], 1); ++hptr; ++lptr; } if (parity == (numcols & 1)) { hptr[0] -= lptr[0]; } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { //lptr[0] += (hptr[0] + 1) >> 1; lptr[0] += jpc_fix_asr(hptr[0] + 1, 1); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { //lptr[0] += (hptr[0] + hptr[1] + 2) >> 2; lptr[0] += jpc_fix_asr(hptr[0] + hptr[1] + 2, 2); ++lptr; ++hptr; } if (parity != (numcols & 1)) { //lptr[0] += (hptr[0] + 1) >> 1; lptr[0] += jpc_fix_asr(hptr[0] + 1, 1); } } else { if (parity) { lptr = &a[0]; //lptr[0] <<= 1; lptr[0] = jpc_fix_asl(lptr[0], 1); } } } void jpc_ft_fwdlift_col(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; #if 0 register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int i; #endif register int n; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { hptr[0] -= lptr[0]; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { //hptr[0] -= (lptr[0] + lptr[stride]) >> 1; hptr[0] -= jpc_fix_asr(lptr[0] + lptr[stride], 1); hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { hptr[0] -= lptr[0]; } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { //lptr[0] += (hptr[0] + 1) >> 1; lptr[0] += jpc_fix_asr(hptr[0] + 1, 1); lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { //lptr[0] += (hptr[0] + hptr[stride] + 2) >> 2; lptr[0] += jpc_fix_asr(hptr[0] + hptr[stride] + 2, 2); lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { //lptr[0] += (hptr[0] + 1) >> 1; lptr[0] += jpc_fix_asr(hptr[0] + 1, 1); } } else { if (parity) { lptr = &a[0]; //lptr[0] <<= 1; lptr[0] = jpc_fix_asl(lptr[0], 1); } } } void jpc_ft_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] -= lptr2[0]; ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //hptr2[0] -= (lptr2[0] + lptr2[stride]) >> 1; hptr2[0] -= jpc_fix_asr(lptr2[0] + lptr2[stride], 1); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] -= lptr2[0]; ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] += (hptr2[0] + 1) >> 1; lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] += (hptr2[0] + hptr2[stride] + 2) >> 2; lptr2[0] += jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] += (hptr2[0] + 1) >> 1; lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } } } else { if (parity) { lptr2 = &a[0]; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] <<= 1; lptr2[0] = jpc_fix_asl(lptr2[0], 1); ++lptr2; } } } } void jpc_ft_fwdlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] -= lptr2[0]; ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //hptr2[0] -= (lptr2[0] + lptr2[stride]) >> 1; hptr2[0] -= jpc_fix_asr(lptr2[0] + lptr2[stride], 1); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] -= lptr2[0]; ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] += (hptr2[0] + 1) >> 1; lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] += (hptr2[0] + hptr2[stride] + 2) >> 2; lptr2[0] += jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] += (hptr2[0] + 1) >> 1; lptr2[0] += jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } } } else { if (parity) { lptr2 = &a[0]; for (i = 0; i < numcols; ++i) { //lptr2[0] <<= 1; lptr2[0] = jpc_fix_asl(lptr2[0], 1); ++lptr2; } } } } void jpc_ft_invlift_row(jpc_fix_t *a, int numcols, int parity) { register jpc_fix_t *lptr; register jpc_fix_t *hptr; register int n; int llen; llen = (numcols + 1 - parity) >> 1; if (numcols > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { //lptr[0] -= (hptr[0] + 1) >> 1; lptr[0] -= jpc_fix_asr(hptr[0] + 1, 1); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { //lptr[0] -= (hptr[0] + hptr[1] + 2) >> 2; lptr[0] -= jpc_fix_asr(hptr[0] + hptr[1] + 2, 2); ++lptr; ++hptr; } if (parity != (numcols & 1)) { //lptr[0] -= (hptr[0] + 1) >> 1; lptr[0] -= jpc_fix_asr(hptr[0] + 1, 1); } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { hptr[0] += lptr[0]; ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { //hptr[0] += (lptr[0] + lptr[1]) >> 1; hptr[0] += jpc_fix_asr(lptr[0] + lptr[1], 1); ++hptr; ++lptr; } if (parity == (numcols & 1)) { hptr[0] += lptr[0]; } } else { if (parity) { lptr = &a[0]; //lptr[0] >>= 1; lptr[0] = jpc_fix_asr(lptr[0], 1); } } } void jpc_ft_invlift_col(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; #if 0 register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int i; #endif register int n; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { //lptr[0] -= (hptr[0] + 1) >> 1; lptr[0] -= jpc_fix_asr(hptr[0] + 1, 1); lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { //lptr[0] -= (hptr[0] + hptr[stride] + 2) >> 2; lptr[0] -= jpc_fix_asr(hptr[0] + hptr[stride] + 2, 2); lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { //lptr[0] -= (hptr[0] + 1) >> 1; lptr[0] -= jpc_fix_asr(hptr[0] + 1, 1); } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { hptr[0] += lptr[0]; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { //hptr[0] += (lptr[0] + lptr[stride]) >> 1; hptr[0] += jpc_fix_asr(lptr[0] + lptr[stride], 1); hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { hptr[0] += lptr[0]; } } else { if (parity) { lptr = &a[0]; //lptr[0] >>= 1; lptr[0] = jpc_fix_asr(lptr[0], 1); } } } void jpc_ft_invlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] -= (hptr2[0] + 1) >> 1; lptr2[0] -= jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] -= (hptr2[0] + hptr2[stride] + 2) >> 2; lptr2[0] -= jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] -= (hptr2[0] + 1) >> 1; lptr2[0] -= jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] += lptr2[0]; ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //hptr2[0] += (lptr2[0] + lptr2[stride]) >> 1; hptr2[0] += jpc_fix_asr(lptr2[0] + lptr2[stride], 1); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] += lptr2[0]; ++lptr2; ++hptr2; } } } else { if (parity) { lptr2 = &a[0]; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] >>= 1; lptr2[0] = jpc_fix_asr(lptr2[0], 1); ++lptr2; } } } } void jpc_ft_invlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] -= (hptr2[0] + 1) >> 1; lptr2[0] -= jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] -= (hptr2[0] + hptr2[stride] + 2) >> 2; lptr2[0] -= jpc_fix_asr(hptr2[0] + hptr2[stride] + 2, 2); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //lptr2[0] -= (hptr2[0] + 1) >> 1; lptr2[0] -= jpc_fix_asr(hptr2[0] + 1, 1); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] += lptr2[0]; ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { //hptr2[0] += (lptr2[0] + lptr2[stride]) >> 1; hptr2[0] += jpc_fix_asr(lptr2[0] + lptr2[stride], 1); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] += lptr2[0]; ++lptr2; ++hptr2; } } } else { if (parity) { lptr2 = &a[0]; for (i = 0; i < numcols; ++i) { //lptr2[0] >>= 1; lptr2[0] = jpc_fix_asr(lptr2[0], 1); ++lptr2; } } } } int jpc_ft_analyze(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride) { int numrows = height; int numcols = width; int rowparity = ystart & 1; int colparity = xstart & 1; int i; jpc_fix_t *startptr; int maxcols; maxcols = (numcols / JPC_QMFB_COLGRPSIZE) * JPC_QMFB_COLGRPSIZE; startptr = &a[0]; for (i = 0; i < maxcols; i += JPC_QMFB_COLGRPSIZE) { jpc_qmfb_split_colgrp(startptr, numrows, stride, rowparity); jpc_ft_fwdlift_colgrp(startptr, numrows, stride, rowparity); startptr += JPC_QMFB_COLGRPSIZE; } if (maxcols < numcols) { jpc_qmfb_split_colres(startptr, numrows, numcols - maxcols, stride, rowparity); jpc_ft_fwdlift_colres(startptr, numrows, numcols - maxcols, stride, rowparity); } startptr = &a[0]; for (i = 0; i < numrows; ++i) { jpc_qmfb_split_row(startptr, numcols, colparity); jpc_ft_fwdlift_row(startptr, numcols, colparity); startptr += stride; } return 0; } int jpc_ft_synthesize(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride) { int numrows = height; int numcols = width; int rowparity = ystart & 1; int colparity = xstart & 1; int maxcols; jpc_fix_t *startptr; int i; startptr = &a[0]; for (i = 0; i < numrows; ++i) { jpc_ft_invlift_row(startptr, numcols, colparity); jpc_qmfb_join_row(startptr, numcols, colparity); startptr += stride; } maxcols = (numcols / JPC_QMFB_COLGRPSIZE) * JPC_QMFB_COLGRPSIZE; startptr = &a[0]; for (i = 0; i < maxcols; i += JPC_QMFB_COLGRPSIZE) { jpc_ft_invlift_colgrp(startptr, numrows, stride, rowparity); jpc_qmfb_join_colgrp(startptr, numrows, stride, rowparity); startptr += JPC_QMFB_COLGRPSIZE; } if (maxcols < numcols) { jpc_ft_invlift_colres(startptr, numrows, numcols - maxcols, stride, rowparity); jpc_qmfb_join_colres(startptr, numrows, numcols - maxcols, stride, rowparity); } return 0; } /******************************************************************************\ * 9/7 transform \******************************************************************************/ #define ALPHA (-1.586134342059924) #define BETA (-0.052980118572961) #define GAMMA (0.882911075530934) #define DELTA (0.443506852043971) #define LGAIN (1.0 / 1.23017410558578) #define HGAIN (1.0 / 1.62578613134411) void jpc_ns_fwdlift_row(jpc_fix_t *a, int numcols, int parity) { register jpc_fix_t *lptr; register jpc_fix_t *hptr; register int n; int llen; llen = (numcols + 1 - parity) >> 1; if (numcols > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_pluseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_pluseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); } /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr[0] = jpc_fix_mul(lptr[0], jpc_dbltofix(LGAIN)); ++lptr; } hptr = &a[llen]; n = numcols - llen; while (n-- > 0) { hptr[0] = jpc_fix_mul(hptr[0], jpc_dbltofix(HGAIN)); ++hptr; } #endif } else { #if defined(WT_LENONE) if (parity) { lptr = &a[0]; //lptr[0] <<= 1; lptr[0] = jpc_fix_asl(lptr[0], 1); } #endif } } void jpc_ns_fwdlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(LGAIN)); ++lptr2; } lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(HGAIN)); ++hptr2; } hptr += stride; } #endif } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] <<= 1; lptr2[0] = jpc_fix_asl(lptr2[0], 1); ++lptr2; } } #endif } } void jpc_ns_fwdlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; for (i = 0; i < numcols; ++i) { lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(LGAIN)); ++lptr2; } lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(HGAIN)); ++hptr2; } hptr += stride; } #endif } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; for (i = 0; i < numcols; ++i) { //lptr2[0] <<= 1; lptr2[0] = jpc_fix_asl(lptr2[0], 1); ++lptr2; } } #endif } } void jpc_ns_fwdlift_col(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_pluseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(LGAIN)); ++lptr2; lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(HGAIN)); ++hptr2; hptr += stride; } #endif } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; //lptr2[0] <<= 1; lptr2[0] = jpc_fix_asl(lptr2[0], 1); ++lptr2; } #endif } } void jpc_ns_invlift_row(jpc_fix_t *a, int numcols, int parity) { register jpc_fix_t *lptr; register jpc_fix_t *hptr; register int n; int llen; llen = (numcols + 1 - parity) >> 1; if (numcols > 1) { /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr[0] = jpc_fix_mul(lptr[0], jpc_dbltofix(1.0 / LGAIN)); ++lptr; } hptr = &a[llen]; n = numcols - llen; while (n-- > 0) { hptr[0] = jpc_fix_mul(hptr[0], jpc_dbltofix(1.0 / HGAIN)); ++hptr; } #endif /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr[0])); } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr[0])); } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (!parity) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); ++lptr; } n = llen - (!parity) - (parity != (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr[0], hptr[1]))); ++lptr; ++hptr; } if (parity != (numcols & 1)) { jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr[0])); } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen]; if (parity) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); ++hptr; } n = numcols - llen - parity - (parity == (numcols & 1)); while (n-- > 0) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr[0], lptr[1]))); ++hptr; ++lptr; } if (parity == (numcols & 1)) { jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr[0])); } } else { #if defined(WT_LENONE) if (parity) { lptr = &a[0]; //lptr[0] >>= 1; lptr[0] = jpc_fix_asr(lptr[0], 1); } #endif } } void jpc_ns_invlift_colgrp(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(1.0 / LGAIN)); ++lptr2; } lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(1.0 / HGAIN)); ++hptr2; } hptr += stride; } #endif /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } } } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; for (i = 0; i < JPC_QMFB_COLGRPSIZE; ++i) { //lptr2[0] >>= 1; lptr2[0] = jpc_fix_asr(lptr2[0], 1); ++lptr2; } } #endif } } void jpc_ns_invlift_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; register int i; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; for (i = 0; i < numcols; ++i) { lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(1.0 / LGAIN)); ++lptr2; } lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; for (i = 0; i < numcols; ++i) { hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(1.0 / HGAIN)); ++hptr2; } hptr += stride; } #endif /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; } lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; } hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; } hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; for (i = 0; i < numcols; ++i) { jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } } } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; for (i = 0; i < numcols; ++i) { //lptr2[0] >>= 1; lptr2[0] = jpc_fix_asr(lptr2[0], 1); ++lptr2; } } #endif } } void jpc_ns_invlift_col(jpc_fix_t *a, int numrows, int stride, int parity) { jpc_fix_t *lptr; jpc_fix_t *hptr; register jpc_fix_t *lptr2; register jpc_fix_t *hptr2; register int n; int llen; llen = (numrows + 1 - parity) >> 1; if (numrows > 1) { /* Apply the scaling step. */ #if defined(WT_DOSCALE) lptr = &a[0]; n = llen; while (n-- > 0) { lptr2 = lptr; lptr2[0] = jpc_fix_mul(lptr2[0], jpc_dbltofix(1.0 / LGAIN)); ++lptr2; lptr += stride; } hptr = &a[llen * stride]; n = numrows - llen; while (n-- > 0) { hptr2 = hptr; hptr2[0] = jpc_fix_mul(hptr2[0], jpc_dbltofix(1.0 / HGAIN)); ++hptr2; hptr += stride; } #endif /* Apply the first lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(DELTA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA), hptr2[0])); ++lptr2; ++hptr2; } /* Apply the second lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++hptr2; ++lptr2; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(GAMMA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA), lptr2[0])); ++lptr2; ++hptr2; } /* Apply the third lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (!parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; lptr += stride; } n = llen - (!parity) - (parity != (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(BETA), jpc_fix_add(hptr2[0], hptr2[stride]))); ++lptr2; ++hptr2; lptr += stride; hptr += stride; } if (parity != (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(lptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA), hptr2[0])); ++lptr2; ++hptr2; } /* Apply the fourth lifting step. */ lptr = &a[0]; hptr = &a[llen * stride]; if (parity) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++hptr2; ++lptr2; hptr += stride; } n = numrows - llen - parity - (parity == (numrows & 1)); while (n-- > 0) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(ALPHA), jpc_fix_add(lptr2[0], lptr2[stride]))); ++lptr2; ++hptr2; hptr += stride; lptr += stride; } if (parity == (numrows & 1)) { lptr2 = lptr; hptr2 = hptr; jpc_fix_minuseq(hptr2[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA), lptr2[0])); ++lptr2; ++hptr2; } } else { #if defined(WT_LENONE) if (parity) { lptr2 = &a[0]; //lptr2[0] >>= 1; lptr2[0] = jpc_fix_asr(lptr2[0], 1); ++lptr2; } #endif } } int jpc_ns_analyze(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride) { int numrows = height; int numcols = width; int rowparity = ystart & 1; int colparity = xstart & 1; int i; jpc_fix_t *startptr; int maxcols; maxcols = (numcols / JPC_QMFB_COLGRPSIZE) * JPC_QMFB_COLGRPSIZE; startptr = &a[0]; for (i = 0; i < maxcols; i += JPC_QMFB_COLGRPSIZE) { jpc_qmfb_split_colgrp(startptr, numrows, stride, rowparity); jpc_ns_fwdlift_colgrp(startptr, numrows, stride, rowparity); startptr += JPC_QMFB_COLGRPSIZE; } if (maxcols < numcols) { jpc_qmfb_split_colres(startptr, numrows, numcols - maxcols, stride, rowparity); jpc_ns_fwdlift_colres(startptr, numrows, numcols - maxcols, stride, rowparity); } startptr = &a[0]; for (i = 0; i < numrows; ++i) { jpc_qmfb_split_row(startptr, numcols, colparity); jpc_ns_fwdlift_row(startptr, numcols, colparity); startptr += stride; } return 0; } int jpc_ns_synthesize(jpc_fix_t *a, int xstart, int ystart, int width, int height, int stride) { int numrows = height; int numcols = width; int rowparity = ystart & 1; int colparity = xstart & 1; int maxcols; jpc_fix_t *startptr; int i; startptr = &a[0]; for (i = 0; i < numrows; ++i) { jpc_ns_invlift_row(startptr, numcols, colparity); jpc_qmfb_join_row(startptr, numcols, colparity); startptr += stride; } maxcols = (numcols / JPC_QMFB_COLGRPSIZE) * JPC_QMFB_COLGRPSIZE; startptr = &a[0]; for (i = 0; i < maxcols; i += JPC_QMFB_COLGRPSIZE) { jpc_ns_invlift_colgrp(startptr, numrows, stride, rowparity); jpc_qmfb_join_colgrp(startptr, numrows, stride, rowparity); startptr += JPC_QMFB_COLGRPSIZE; } if (maxcols < numcols) { jpc_ns_invlift_colres(startptr, numrows, numcols - maxcols, stride, rowparity); jpc_qmfb_join_colres(startptr, numrows, numcols - maxcols, stride, rowparity); } return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_5353_1
crossvul-cpp_data_bad_3421_0
/* * CDXL video decoder * Copyright (c) 2011-2012 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Commodore CDXL video decoder * @author Paul B Mahol */ #define UNCHECKED_BITSTREAM_READER 1 #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" #include "get_bits.h" #include "internal.h" #define BIT_PLANAR 0x00 #define CHUNKY 0x20 #define BYTE_PLANAR 0x40 #define BIT_LINE 0x80 #define BYTE_LINE 0xC0 typedef struct CDXLVideoContext { AVCodecContext *avctx; int bpp; int format; int padded_bits; const uint8_t *palette; int palette_size; const uint8_t *video; int video_size; uint8_t *new_video; int new_video_size; } CDXLVideoContext; static av_cold int cdxl_decode_init(AVCodecContext *avctx) { CDXLVideoContext *c = avctx->priv_data; c->new_video_size = 0; c->avctx = avctx; return 0; } static void import_palette(CDXLVideoContext *c, uint32_t *new_palette) { int i; for (i = 0; i < c->palette_size / 2; i++) { unsigned rgb = AV_RB16(&c->palette[i * 2]); unsigned r = ((rgb >> 8) & 0xF) * 0x11; unsigned g = ((rgb >> 4) & 0xF) * 0x11; unsigned b = (rgb & 0xF) * 0x11; AV_WN32(&new_palette[i], (0xFFU << 24) | (r << 16) | (g << 8) | b); } } static void bitplanar2chunky(CDXLVideoContext *c, int linesize, uint8_t *out) { GetBitContext gb; int x, y, plane; if (init_get_bits8(&gb, c->video, c->video_size) < 0) return; for (plane = 0; plane < c->bpp; plane++) { for (y = 0; y < c->avctx->height; y++) { for (x = 0; x < c->avctx->width; x++) out[linesize * y + x] |= get_bits1(&gb) << plane; skip_bits(&gb, c->padded_bits); } } } static void bitline2chunky(CDXLVideoContext *c, int linesize, uint8_t *out) { GetBitContext gb; int x, y, plane; if (init_get_bits8(&gb, c->video, c->video_size) < 0) return; for (y = 0; y < c->avctx->height; y++) { for (plane = 0; plane < c->bpp; plane++) { for (x = 0; x < c->avctx->width; x++) out[linesize * y + x] |= get_bits1(&gb) << plane; skip_bits(&gb, c->padded_bits); } } } static void chunky2chunky(CDXLVideoContext *c, int linesize, uint8_t *out) { GetByteContext gb; int y; bytestream2_init(&gb, c->video, c->video_size); for (y = 0; y < c->avctx->height; y++) { bytestream2_get_buffer(&gb, out + linesize * y, c->avctx->width * 3); } } static void import_format(CDXLVideoContext *c, int linesize, uint8_t *out) { memset(out, 0, linesize * c->avctx->height); switch (c->format) { case BIT_PLANAR: bitplanar2chunky(c, linesize, out); break; case BIT_LINE: bitline2chunky(c, linesize, out); break; case CHUNKY: chunky2chunky(c, linesize, out); break; } } static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame) { uint32_t *new_palette = (uint32_t *)frame->data[1]; memset(frame->data[1], 0, AVPALETTE_SIZE); import_palette(c, new_palette); import_format(c, frame->linesize[0], frame->data[0]); } static void cdxl_decode_raw(CDXLVideoContext *c, AVFrame *frame) { import_format(c, frame->linesize[0], frame->data[0]); } static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame) { AVCodecContext *avctx = c->avctx; uint32_t new_palette[16], r, g, b; uint8_t *ptr, *out, index, op; int x, y; ptr = c->new_video; out = frame->data[0]; import_palette(c, new_palette); import_format(c, avctx->width, c->new_video); for (y = 0; y < avctx->height; y++) { r = new_palette[0] & 0xFF0000; g = new_palette[0] & 0xFF00; b = new_palette[0] & 0xFF; for (x = 0; x < avctx->width; x++) { index = *ptr++; op = index >> 4; index &= 15; switch (op) { case 0: r = new_palette[index] & 0xFF0000; g = new_palette[index] & 0xFF00; b = new_palette[index] & 0xFF; break; case 1: b = index * 0x11; break; case 2: r = index * 0x11 << 16; break; case 3: g = index * 0x11 << 8; break; } AV_WL24(out + x * 3, r | g | b); } out += frame->linesize[0]; } } static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame) { AVCodecContext *avctx = c->avctx; uint32_t new_palette[64], r, g, b; uint8_t *ptr, *out, index, op; int x, y; ptr = c->new_video; out = frame->data[0]; import_palette(c, new_palette); import_format(c, avctx->width, c->new_video); for (y = 0; y < avctx->height; y++) { r = new_palette[0] & 0xFF0000; g = new_palette[0] & 0xFF00; b = new_palette[0] & 0xFF; for (x = 0; x < avctx->width; x++) { index = *ptr++; op = index >> 6; index &= 63; switch (op) { case 0: r = new_palette[index] & 0xFF0000; g = new_palette[index] & 0xFF00; b = new_palette[index] & 0xFF; break; case 1: b = (index << 2) | (b & 3); break; case 2: r = (index << 18) | (r & (3 << 16)); break; case 3: g = (index << 10) | (g & (3 << 8)); break; } AV_WL24(out + x * 3, r | g | b); } out += frame->linesize[0]; } } static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE && c->format != CHUNKY) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if (c->format == CHUNKY) aligned_width = avctx->width; else aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * (int64_t)c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8 && c->format != CHUNKY) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else if (!encoding && c->bpp == 24 && c->format == CHUNKY && !c->palette_size) { avctx->pix_fmt = AV_PIX_FMT_RGB24; } else { avpriv_request_sample(avctx, "Encoding %d, bpp %d and format 0x%x", encoding, c->bpp, c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + AV_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { cdxl_decode_rgb(c, p); } else { cdxl_decode_raw(c, p); } *got_frame = 1; return buf_size; } static av_cold int cdxl_decode_end(AVCodecContext *avctx) { CDXLVideoContext *c = avctx->priv_data; av_freep(&c->new_video); return 0; } AVCodec ff_cdxl_decoder = { .name = "cdxl", .long_name = NULL_IF_CONFIG_SMALL("Commodore CDXL video"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_CDXL, .priv_data_size = sizeof(CDXLVideoContext), .init = cdxl_decode_init, .close = cdxl_decode_end, .decode = cdxl_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3421_0
crossvul-cpp_data_bad_5606_0
/* * cdc-wdm.c * * This driver supports USB CDC WCM Device Management. * * Copyright (c) 2007-2009 Oliver Neukum * * Some code taken from cdc-acm.c * * Released under the GPLv2. * * Many thanks to Carl Nordbeck */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/poll.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <linux/usb/cdc-wdm.h> /* * Version Information */ #define DRIVER_VERSION "v0.03" #define DRIVER_AUTHOR "Oliver Neukum" #define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management" static const struct usb_device_id wdm_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_DMM }, { } }; MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_MINOR_BASE 176 #define WDM_IN_USE 1 #define WDM_DISCONNECTING 2 #define WDM_RESULT 3 #define WDM_READ 4 #define WDM_INT_STALL 5 #define WDM_POLL_RUNNING 6 #define WDM_RESPONDING 7 #define WDM_SUSPENDING 8 #define WDM_RESETTING 9 #define WDM_MAX 16 /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ #define WDM_DEFAULT_BUFSIZE 256 static DEFINE_MUTEX(wdm_mutex); static DEFINE_SPINLOCK(wdm_device_list_lock); static LIST_HEAD(wdm_device_list); /* --- method tables --- */ struct wdm_device { u8 *inbuf; /* buffer for response */ u8 *outbuf; /* buffer for command */ u8 *sbuf; /* buffer for status */ u8 *ubuf; /* buffer for copy to user space */ struct urb *command; struct urb *response; struct urb *validity; struct usb_interface *intf; struct usb_ctrlrequest *orq; struct usb_ctrlrequest *irq; spinlock_t iuspin; unsigned long flags; u16 bufsize; u16 wMaxCommand; u16 wMaxPacketSize; __le16 inum; int reslength; int length; int read; int count; dma_addr_t shandle; dma_addr_t ihandle; struct mutex wlock; struct mutex rlock; wait_queue_head_t wait; struct work_struct rxwork; int werr; int rerr; struct list_head device_list; int (*manage_power)(struct usb_interface *, int); }; static struct usb_driver wdm_driver; /* return intfdata if we own the interface, else look up intf in the list */ static struct wdm_device *wdm_find_device(struct usb_interface *intf) { struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf == intf) goto found; desc = NULL; found: spin_unlock(&wdm_device_list_lock); return desc; } static struct wdm_device *wdm_find_device_by_minor(int minor) { struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf->minor == minor) goto found; desc = NULL; found: spin_unlock(&wdm_device_list_lock); return desc; } /* --- callbacks --- */ static void wdm_out_callback(struct urb *urb) { struct wdm_device *desc; desc = urb->context; spin_lock(&desc->iuspin); desc->werr = urb->status; spin_unlock(&desc->iuspin); kfree(desc->outbuf); desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); wake_up(&desc->wait); } static void wdm_in_callback(struct urb *urb) { struct wdm_device *desc = urb->context; int status = urb->status; spin_lock(&desc->iuspin); clear_bit(WDM_RESPONDING, &desc->flags); if (status) { switch (status) { case -ENOENT: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ENOENT"); goto skip_error; case -ECONNRESET: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ECONNRESET"); goto skip_error; case -ESHUTDOWN: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ESHUTDOWN"); goto skip_error; case -EPIPE: dev_err(&desc->intf->dev, "nonzero urb status received: -EPIPE\n"); break; default: dev_err(&desc->intf->dev, "Unexpected error %d\n", status); break; } } desc->rerr = status; desc->reslength = urb->actual_length; memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); desc->length += desc->reslength; skip_error: wake_up(&desc->wait); set_bit(WDM_READ, &desc->flags); spin_unlock(&desc->iuspin); } static void wdm_int_callback(struct urb *urb) { int rv = 0; int status = urb->status; struct wdm_device *desc; struct usb_cdc_notification *dr; desc = urb->context; dr = (struct usb_cdc_notification *)desc->sbuf; if (status) { switch (status) { case -ESHUTDOWN: case -ENOENT: case -ECONNRESET: return; /* unplug */ case -EPIPE: set_bit(WDM_INT_STALL, &desc->flags); dev_err(&desc->intf->dev, "Stall on int endpoint\n"); goto sw; /* halt is cleared in work */ default: dev_err(&desc->intf->dev, "nonzero urb status received: %d\n", status); break; } } if (urb->actual_length < sizeof(struct usb_cdc_notification)) { dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", urb->actual_length); goto exit; } switch (dr->bNotificationType) { case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: dev_dbg(&desc->intf->dev, "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d", dr->wIndex, dr->wLength); break; case USB_CDC_NOTIFY_NETWORK_CONNECTION: dev_dbg(&desc->intf->dev, "NOTIFY_NETWORK_CONNECTION %s network", dr->wValue ? "connected to" : "disconnected from"); goto exit; default: clear_bit(WDM_POLL_RUNNING, &desc->flags); dev_err(&desc->intf->dev, "unknown notification %d received: index %d len %d\n", dr->bNotificationType, dr->wIndex, dr->wLength); goto exit; } spin_lock(&desc->iuspin); clear_bit(WDM_READ, &desc->flags); set_bit(WDM_RESPONDING, &desc->flags); if (!test_bit(WDM_DISCONNECTING, &desc->flags) && !test_bit(WDM_SUSPENDING, &desc->flags)) { rv = usb_submit_urb(desc->response, GFP_ATOMIC); dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", __func__, rv); } spin_unlock(&desc->iuspin); if (rv < 0) { clear_bit(WDM_RESPONDING, &desc->flags); if (rv == -EPERM) return; if (rv == -ENOMEM) { sw: rv = schedule_work(&desc->rxwork); if (rv) dev_err(&desc->intf->dev, "Cannot schedule work\n"); } } exit: rv = usb_submit_urb(urb, GFP_ATOMIC); if (rv) dev_err(&desc->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, rv); } static void kill_urbs(struct wdm_device *desc) { /* the order here is essential */ usb_kill_urb(desc->command); usb_kill_urb(desc->validity); usb_kill_urb(desc->response); } static void free_urbs(struct wdm_device *desc) { usb_free_urb(desc->validity); usb_free_urb(desc->response); usb_free_urb(desc->command); } static void cleanup(struct wdm_device *desc) { kfree(desc->sbuf); kfree(desc->inbuf); kfree(desc->orq); kfree(desc->irq); kfree(desc->ubuf); free_urbs(desc); kfree(desc); } static ssize_t wdm_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { u8 *buf; int rv = -EMSGSIZE, r, we; struct wdm_device *desc = file->private_data; struct usb_ctrlrequest *req; if (count > desc->wMaxCommand) count = desc->wMaxCommand; spin_lock_irq(&desc->iuspin); we = desc->werr; desc->werr = 0; spin_unlock_irq(&desc->iuspin); if (we < 0) return -EIO; buf = kmalloc(count, GFP_KERNEL); if (!buf) { rv = -ENOMEM; goto outnl; } r = copy_from_user(buf, buffer, count); if (r > 0) { kfree(buf); rv = -EFAULT; goto outnl; } /* concurrent writes and disconnect */ r = mutex_lock_interruptible(&desc->wlock); rv = -ERESTARTSYS; if (r) { kfree(buf); goto outnl; } if (test_bit(WDM_DISCONNECTING, &desc->flags)) { kfree(buf); rv = -ENODEV; goto outnp; } r = usb_autopm_get_interface(desc->intf); if (r < 0) { kfree(buf); rv = usb_translate_errors(r); goto outnp; } if (!(file->f_flags & O_NONBLOCK)) r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); else if (test_bit(WDM_IN_USE, &desc->flags)) r = -EAGAIN; if (test_bit(WDM_RESETTING, &desc->flags)) r = -EIO; if (r < 0) { kfree(buf); rv = r; goto out; } req = desc->orq; usb_fill_control_urb( desc->command, interface_to_usbdev(desc->intf), /* using common endpoint 0 */ usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0), (unsigned char *)req, buf, count, wdm_out_callback, desc ); req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; req->wValue = 0; req->wIndex = desc->inum; req->wLength = cpu_to_le16(count); set_bit(WDM_IN_USE, &desc->flags); desc->outbuf = buf; rv = usb_submit_urb(desc->command, GFP_KERNEL); if (rv < 0) { kfree(buf); desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); rv = usb_translate_errors(rv); } else { dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", req->wIndex); } out: usb_autopm_put_interface(desc->intf); outnp: mutex_unlock(&desc->wlock); outnl: return rv < 0 ? rv : count; } static ssize_t wdm_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { int rv, cntr; int i = 0; struct wdm_device *desc = file->private_data; rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ if (rv < 0) return -ERESTARTSYS; cntr = ACCESS_ONCE(desc->length); if (cntr == 0) { desc->read = 0; retry: if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { rv = cntr ? cntr : -EAGAIN; goto err; } rv = 0; } else { rv = wait_event_interruptible(desc->wait, test_bit(WDM_READ, &desc->flags)); } /* may have happened while we slept */ if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } if (test_bit(WDM_RESETTING, &desc->flags)) { rv = -EIO; goto err; } usb_mark_last_busy(interface_to_usbdev(desc->intf)); if (rv < 0) { rv = -ERESTARTSYS; goto err; } spin_lock_irq(&desc->iuspin); if (desc->rerr) { /* read completed, error happened */ desc->rerr = 0; spin_unlock_irq(&desc->iuspin); rv = -EIO; goto err; } /* * recheck whether we've lost the race * against the completion handler */ if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */ spin_unlock_irq(&desc->iuspin); goto retry; } if (!desc->reslength) { /* zero length read */ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); clear_bit(WDM_READ, &desc->flags); spin_unlock_irq(&desc->iuspin); goto retry; } cntr = desc->length; spin_unlock_irq(&desc->iuspin); } if (cntr > count) cntr = count; rv = copy_to_user(buffer, desc->ubuf, cntr); if (rv > 0) { rv = -EFAULT; goto err; } spin_lock_irq(&desc->iuspin); for (i = 0; i < desc->length - cntr; i++) desc->ubuf[i] = desc->ubuf[i + cntr]; desc->length -= cntr; /* in case we had outstanding data */ if (!desc->length) clear_bit(WDM_READ, &desc->flags); spin_unlock_irq(&desc->iuspin); rv = cntr; err: mutex_unlock(&desc->rlock); return rv; } static int wdm_flush(struct file *file, fl_owner_t id) { struct wdm_device *desc = file->private_data; wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); /* cannot dereference desc->intf if WDM_DISCONNECTING */ if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) dev_err(&desc->intf->dev, "Error in flush path: %d\n", desc->werr); return usb_translate_errors(desc->werr); } static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait) { struct wdm_device *desc = file->private_data; unsigned long flags; unsigned int mask = 0; spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { mask = POLLHUP | POLLERR; spin_unlock_irqrestore(&desc->iuspin, flags); goto desc_out; } if (test_bit(WDM_READ, &desc->flags)) mask = POLLIN | POLLRDNORM; if (desc->rerr || desc->werr) mask |= POLLERR; if (!test_bit(WDM_IN_USE, &desc->flags)) mask |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&desc->iuspin, flags); poll_wait(file, &desc->wait, wait); desc_out: return mask; } static int wdm_open(struct inode *inode, struct file *file) { int minor = iminor(inode); int rv = -ENODEV; struct usb_interface *intf; struct wdm_device *desc; mutex_lock(&wdm_mutex); desc = wdm_find_device_by_minor(minor); if (!desc) goto out; intf = desc->intf; if (test_bit(WDM_DISCONNECTING, &desc->flags)) goto out; file->private_data = desc; rv = usb_autopm_get_interface(desc->intf); if (rv < 0) { dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); goto out; } /* using write lock to protect desc->count */ mutex_lock(&desc->wlock); if (!desc->count++) { desc->werr = 0; desc->rerr = 0; rv = usb_submit_urb(desc->validity, GFP_KERNEL); if (rv < 0) { desc->count--; dev_err(&desc->intf->dev, "Error submitting int urb - %d\n", rv); rv = usb_translate_errors(rv); } } else { rv = 0; } mutex_unlock(&desc->wlock); if (desc->count == 1) desc->manage_power(intf, 1); usb_autopm_put_interface(desc->intf); out: mutex_unlock(&wdm_mutex); return rv; } static int wdm_release(struct inode *inode, struct file *file) { struct wdm_device *desc = file->private_data; mutex_lock(&wdm_mutex); /* using write lock to protect desc->count */ mutex_lock(&desc->wlock); desc->count--; mutex_unlock(&desc->wlock); if (!desc->count) { if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); kill_urbs(desc); desc->manage_power(desc->intf, 0); } else { /* must avoid dev_printk here as desc->intf is invalid */ pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__); cleanup(desc); } } mutex_unlock(&wdm_mutex); return 0; } static const struct file_operations wdm_fops = { .owner = THIS_MODULE, .read = wdm_read, .write = wdm_write, .open = wdm_open, .flush = wdm_flush, .release = wdm_release, .poll = wdm_poll, .llseek = noop_llseek, }; static struct usb_class_driver wdm_class = { .name = "cdc-wdm%d", .fops = &wdm_fops, .minor_base = WDM_MINOR_BASE, }; /* --- error handling --- */ static void wdm_rxwork(struct work_struct *work) { struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); unsigned long flags; int rv; spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { spin_unlock_irqrestore(&desc->iuspin, flags); } else { spin_unlock_irqrestore(&desc->iuspin, flags); rv = usb_submit_urb(desc->response, GFP_KERNEL); if (rv < 0 && rv != -EPERM) { spin_lock_irqsave(&desc->iuspin, flags); if (!test_bit(WDM_DISCONNECTING, &desc->flags)) schedule_work(&desc->rxwork); spin_unlock_irqrestore(&desc->iuspin, flags); } } } /* --- hotplug --- */ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, u16 bufsize, int (*manage_power)(struct usb_interface *, int)) { int rv = -ENOMEM; struct wdm_device *desc; desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); if (!desc) goto out; INIT_LIST_HEAD(&desc->device_list); mutex_init(&desc->rlock); mutex_init(&desc->wlock); spin_lock_init(&desc->iuspin); init_waitqueue_head(&desc->wait); desc->wMaxCommand = bufsize; /* this will be expanded and needed in hardware endianness */ desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber); desc->intf = intf; INIT_WORK(&desc->rxwork, wdm_rxwork); rv = -EINVAL; if (!usb_endpoint_is_int_in(ep)) goto err; desc->wMaxPacketSize = usb_endpoint_maxp(ep); desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!desc->orq) goto err; desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!desc->irq) goto err; desc->validity = usb_alloc_urb(0, GFP_KERNEL); if (!desc->validity) goto err; desc->response = usb_alloc_urb(0, GFP_KERNEL); if (!desc->response) goto err; desc->command = usb_alloc_urb(0, GFP_KERNEL); if (!desc->command) goto err; desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); if (!desc->ubuf) goto err; desc->sbuf = kmalloc(desc->wMaxPacketSize, GFP_KERNEL); if (!desc->sbuf) goto err; desc->inbuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); if (!desc->inbuf) goto err; usb_fill_int_urb( desc->validity, interface_to_usbdev(intf), usb_rcvintpipe(interface_to_usbdev(intf), ep->bEndpointAddress), desc->sbuf, desc->wMaxPacketSize, wdm_int_callback, desc, ep->bInterval ); desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; desc->irq->wValue = 0; desc->irq->wIndex = desc->inum; desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); usb_fill_control_urb( desc->response, interface_to_usbdev(intf), /* using common endpoint 0 */ usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0), (unsigned char *)desc->irq, desc->inbuf, desc->wMaxCommand, wdm_in_callback, desc ); desc->manage_power = manage_power; spin_lock(&wdm_device_list_lock); list_add(&desc->device_list, &wdm_device_list); spin_unlock(&wdm_device_list_lock); rv = usb_register_dev(intf, &wdm_class); if (rv < 0) goto err; else dev_info(&intf->dev, "%s: USB WDM device\n", dev_name(intf->usb_dev)); out: return rv; err: spin_lock(&wdm_device_list_lock); list_del(&desc->device_list); spin_unlock(&wdm_device_list_lock); cleanup(desc); return rv; } static int wdm_manage_power(struct usb_interface *intf, int on) { /* need autopm_get/put here to ensure the usbcore sees the new value */ int rv = usb_autopm_get_interface(intf); if (rv < 0) goto err; intf->needs_remote_wakeup = on; usb_autopm_put_interface(intf); err: return rv; } static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) { int rv = -EINVAL; struct usb_host_interface *iface; struct usb_endpoint_descriptor *ep; struct usb_cdc_dmm_desc *dmhd; u8 *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; u16 maxcom = WDM_DEFAULT_BUFSIZE; if (!buffer) goto err; while (buflen > 2) { if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_HEADER_TYPE: break; case USB_CDC_DMM_TYPE: dmhd = (struct usb_cdc_dmm_desc *)buffer; maxcom = le16_to_cpu(dmhd->wMaxCommand); dev_dbg(&intf->dev, "Finding maximum buffer length: %d", maxcom); break; default: dev_err(&intf->dev, "Ignoring extra header, type %d, length %d\n", buffer[2], buffer[0]); break; } next_desc: buflen -= buffer[0]; buffer += buffer[0]; } iface = intf->cur_altsetting; if (iface->desc.bNumEndpoints != 1) goto err; ep = &iface->endpoint[0].desc; rv = wdm_create(intf, ep, maxcom, &wdm_manage_power); err: return rv; } /** * usb_cdc_wdm_register - register a WDM subdriver * @intf: usb interface the subdriver will associate with * @ep: interrupt endpoint to monitor for notifications * @bufsize: maximum message size to support for read/write * * Create WDM usb class character device and associate it with intf * without binding, allowing another driver to manage the interface. * * The subdriver will manage the given interrupt endpoint exclusively * and will issue control requests referring to the given intf. It * will otherwise avoid interferring, and in particular not do * usb_set_intfdata/usb_get_intfdata on intf. * * The return value is a pointer to the subdriver's struct usb_driver. * The registering driver is responsible for calling this subdriver's * disconnect, suspend, resume, pre_reset and post_reset methods from * its own. */ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, int bufsize, int (*manage_power)(struct usb_interface *, int)) { int rv = -EINVAL; rv = wdm_create(intf, ep, bufsize, manage_power); if (rv < 0) goto err; return &wdm_driver; err: return ERR_PTR(rv); } EXPORT_SYMBOL(usb_cdc_wdm_register); static void wdm_disconnect(struct usb_interface *intf) { struct wdm_device *desc; unsigned long flags; usb_deregister_dev(intf, &wdm_class); desc = wdm_find_device(intf); mutex_lock(&wdm_mutex); /* the spinlock makes sure no new urbs are generated in the callbacks */ spin_lock_irqsave(&desc->iuspin, flags); set_bit(WDM_DISCONNECTING, &desc->flags); set_bit(WDM_READ, &desc->flags); /* to terminate pending flushes */ clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); wake_up_all(&desc->wait); mutex_lock(&desc->rlock); mutex_lock(&desc->wlock); kill_urbs(desc); cancel_work_sync(&desc->rxwork); mutex_unlock(&desc->wlock); mutex_unlock(&desc->rlock); /* the desc->intf pointer used as list key is now invalid */ spin_lock(&wdm_device_list_lock); list_del(&desc->device_list); spin_unlock(&wdm_device_list_lock); if (!desc->count) cleanup(desc); else dev_dbg(&intf->dev, "%s: %d open files - postponing cleanup\n", __func__, desc->count); mutex_unlock(&wdm_mutex); } #ifdef CONFIG_PM static int wdm_suspend(struct usb_interface *intf, pm_message_t message) { struct wdm_device *desc = wdm_find_device(intf); int rv = 0; dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); /* if this is an autosuspend the caller does the locking */ if (!PMSG_IS_AUTO(message)) { mutex_lock(&desc->rlock); mutex_lock(&desc->wlock); } spin_lock_irq(&desc->iuspin); if (PMSG_IS_AUTO(message) && (test_bit(WDM_IN_USE, &desc->flags) || test_bit(WDM_RESPONDING, &desc->flags))) { spin_unlock_irq(&desc->iuspin); rv = -EBUSY; } else { set_bit(WDM_SUSPENDING, &desc->flags); spin_unlock_irq(&desc->iuspin); /* callback submits work - order is essential */ kill_urbs(desc); cancel_work_sync(&desc->rxwork); } if (!PMSG_IS_AUTO(message)) { mutex_unlock(&desc->wlock); mutex_unlock(&desc->rlock); } return rv; } #endif static int recover_from_urb_loss(struct wdm_device *desc) { int rv = 0; if (desc->count) { rv = usb_submit_urb(desc->validity, GFP_NOIO); if (rv < 0) dev_err(&desc->intf->dev, "Error resume submitting int urb - %d\n", rv); } return rv; } #ifdef CONFIG_PM static int wdm_resume(struct usb_interface *intf) { struct wdm_device *desc = wdm_find_device(intf); int rv; dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); clear_bit(WDM_SUSPENDING, &desc->flags); rv = recover_from_urb_loss(desc); return rv; } #endif static int wdm_pre_reset(struct usb_interface *intf) { struct wdm_device *desc = wdm_find_device(intf); /* * we notify everybody using poll of * an exceptional situation * must be done before recovery lest a spontaneous * message from the device is lost */ spin_lock_irq(&desc->iuspin); set_bit(WDM_RESETTING, &desc->flags); /* inform read/write */ set_bit(WDM_READ, &desc->flags); /* unblock read */ clear_bit(WDM_IN_USE, &desc->flags); /* unblock write */ desc->rerr = -EINTR; spin_unlock_irq(&desc->iuspin); wake_up_all(&desc->wait); mutex_lock(&desc->rlock); mutex_lock(&desc->wlock); kill_urbs(desc); cancel_work_sync(&desc->rxwork); return 0; } static int wdm_post_reset(struct usb_interface *intf) { struct wdm_device *desc = wdm_find_device(intf); int rv; clear_bit(WDM_RESETTING, &desc->flags); rv = recover_from_urb_loss(desc); mutex_unlock(&desc->wlock); mutex_unlock(&desc->rlock); return 0; } static struct usb_driver wdm_driver = { .name = "cdc_wdm", .probe = wdm_probe, .disconnect = wdm_disconnect, #ifdef CONFIG_PM .suspend = wdm_suspend, .resume = wdm_resume, .reset_resume = wdm_resume, #endif .pre_reset = wdm_pre_reset, .post_reset = wdm_post_reset, .id_table = wdm_ids, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(wdm_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5606_0
crossvul-cpp_data_bad_5579_0
/* net_packet.c -- Handles in- and outgoing VPN packets Copyright (C) 1998-2005 Ivo Timmermans, 2000-2013 Guus Sliepen <guus@tinc-vpn.org> 2010 Timothy Redaelli <timothy@redaelli.eu> 2010 Brandon Black <blblack@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "system.h" #include <openssl/rand.h> #include <openssl/err.h> #include <openssl/evp.h> #include <openssl/pem.h> #include <openssl/hmac.h> #ifdef HAVE_ZLIB #include <zlib.h> #endif #ifdef HAVE_LZO #include LZO1X_H #endif #include "avl_tree.h" #include "conf.h" #include "connection.h" #include "device.h" #include "ethernet.h" #include "event.h" #include "graph.h" #include "logger.h" #include "net.h" #include "netutl.h" #include "protocol.h" #include "process.h" #include "route.h" #include "utils.h" #include "xalloc.h" int keylifetime = 0; int keyexpires = 0; #ifdef HAVE_LZO static char lzo_wrkmem[LZO1X_999_MEM_COMPRESS > LZO1X_1_MEM_COMPRESS ? LZO1X_999_MEM_COMPRESS : LZO1X_1_MEM_COMPRESS]; #endif static void send_udppacket(node_t *, vpn_packet_t *); unsigned replaywin = 16; bool localdiscovery = false; #define MAX_SEQNO 1073741824 /* mtuprobes == 1..30: initial discovery, send bursts with 1 second interval mtuprobes == 31: sleep pinginterval seconds mtuprobes == 32: send 1 burst, sleep pingtimeout second mtuprobes == 33: no response from other side, restart PMTU discovery process Probes are sent in batches of at least three, with random sizes between the lower and upper boundaries for the MTU thus far discovered. After the initial discovery, a fourth packet is added to each batch with a size larger than the currently known PMTU, to test if the PMTU has increased. In case local discovery is enabled, another packet is added to each batch, which will be broadcast to the local network. */ void send_mtu_probe(node_t *n) { vpn_packet_t packet; int len, i; int timeout = 1; n->mtuprobes++; n->mtuevent = NULL; if(!n->status.reachable || !n->status.validkey) { ifdebug(TRAFFIC) logger(LOG_INFO, "Trying to send MTU probe to unreachable or rekeying node %s (%s)", n->name, n->hostname); n->mtuprobes = 0; return; } if(n->mtuprobes > 32) { if(!n->minmtu) { n->mtuprobes = 31; timeout = pinginterval; goto end; } ifdebug(TRAFFIC) logger(LOG_INFO, "%s (%s) did not respond to UDP ping, restarting PMTU discovery", n->name, n->hostname); n->mtuprobes = 1; n->minmtu = 0; n->maxmtu = MTU; } if(n->mtuprobes >= 10 && n->mtuprobes < 32 && !n->minmtu) { ifdebug(TRAFFIC) logger(LOG_INFO, "No response to MTU probes from %s (%s)", n->name, n->hostname); n->mtuprobes = 31; } if(n->mtuprobes == 30 || (n->mtuprobes < 30 && n->minmtu >= n->maxmtu)) { if(n->minmtu > n->maxmtu) n->minmtu = n->maxmtu; else n->maxmtu = n->minmtu; n->mtu = n->minmtu; ifdebug(TRAFFIC) logger(LOG_INFO, "Fixing MTU of %s (%s) to %d after %d probes", n->name, n->hostname, n->mtu, n->mtuprobes); n->mtuprobes = 31; } if(n->mtuprobes == 31) { timeout = pinginterval; goto end; } else if(n->mtuprobes == 32) { timeout = pingtimeout; } for(i = 0; i < 4 + localdiscovery; i++) { if(i == 0) { if(n->mtuprobes < 30 || n->maxmtu + 8 >= MTU) continue; len = n->maxmtu + 8; } else if(n->maxmtu <= n->minmtu) { len = n->maxmtu; } else { len = n->minmtu + 1 + rand() % (n->maxmtu - n->minmtu); } if(len < 64) len = 64; memset(packet.data, 0, 14); RAND_pseudo_bytes(packet.data + 14, len - 14); packet.len = len; if(i >= 4 && n->mtuprobes <= 10) packet.priority = -1; else packet.priority = 0; ifdebug(TRAFFIC) logger(LOG_INFO, "Sending MTU probe length %d to %s (%s)", len, n->name, n->hostname); send_udppacket(n, &packet); } end: n->mtuevent = new_event(); n->mtuevent->handler = (event_handler_t)send_mtu_probe; n->mtuevent->data = n; n->mtuevent->time = now + timeout; event_add(n->mtuevent); } void mtu_probe_h(node_t *n, vpn_packet_t *packet, length_t len) { ifdebug(TRAFFIC) logger(LOG_INFO, "Got MTU probe length %d from %s (%s)", packet->len, n->name, n->hostname); if(!packet->data[0]) { packet->data[0] = 1; send_udppacket(n, packet); } else { if(n->mtuprobes > 30) { if (len == n->maxmtu + 8) { ifdebug(TRAFFIC) logger(LOG_INFO, "Increase in PMTU to %s (%s) detected, restarting PMTU discovery", n->name, n->hostname); n->maxmtu = MTU; n->mtuprobes = 10; return; } if(n->minmtu) n->mtuprobes = 30; else n->mtuprobes = 1; } if(len > n->maxmtu) len = n->maxmtu; if(n->minmtu < len) n->minmtu = len; } } static length_t compress_packet(uint8_t *dest, const uint8_t *source, length_t len, int level) { if(level == 0) { memcpy(dest, source, len); return len; } else if(level == 10) { #ifdef HAVE_LZO lzo_uint lzolen = MAXSIZE; lzo1x_1_compress(source, len, dest, &lzolen, lzo_wrkmem); return lzolen; #else return -1; #endif } else if(level < 10) { #ifdef HAVE_ZLIB unsigned long destlen = MAXSIZE; if(compress2(dest, &destlen, source, len, level) == Z_OK) return destlen; else #endif return -1; } else { #ifdef HAVE_LZO lzo_uint lzolen = MAXSIZE; lzo1x_999_compress(source, len, dest, &lzolen, lzo_wrkmem); return lzolen; #else return -1; #endif } return -1; } static length_t uncompress_packet(uint8_t *dest, const uint8_t *source, length_t len, int level) { if(level == 0) { memcpy(dest, source, len); return len; } else if(level > 9) { #ifdef HAVE_LZO lzo_uint lzolen = MAXSIZE; if(lzo1x_decompress_safe(source, len, dest, &lzolen, NULL) == LZO_E_OK) return lzolen; else #endif return -1; } #ifdef HAVE_ZLIB else { unsigned long destlen = MAXSIZE; if(uncompress(dest, &destlen, source, len) == Z_OK) return destlen; else return -1; } #endif return -1; } /* VPN packet I/O */ static void receive_packet(node_t *n, vpn_packet_t *packet) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Received packet of %d bytes from %s (%s)", packet->len, n->name, n->hostname); route(n, packet); } static bool try_mac(const node_t *n, const vpn_packet_t *inpkt) { unsigned char hmac[EVP_MAX_MD_SIZE]; if(!n->indigest || !n->inmaclength || !n->inkey || inpkt->len < sizeof inpkt->seqno + n->inmaclength) return false; HMAC(n->indigest, n->inkey, n->inkeylength, (unsigned char *) &inpkt->seqno, inpkt->len - n->inmaclength, (unsigned char *)hmac, NULL); return !memcmp(hmac, (char *) &inpkt->seqno + inpkt->len - n->inmaclength, n->inmaclength); } static void receive_udppacket(node_t *n, vpn_packet_t *inpkt) { vpn_packet_t pkt1, pkt2; vpn_packet_t *pkt[] = { &pkt1, &pkt2, &pkt1, &pkt2 }; int nextpkt = 0; vpn_packet_t *outpkt = pkt[0]; int outlen, outpad; unsigned char hmac[EVP_MAX_MD_SIZE]; int i; if(!n->inkey) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Got packet from %s (%s) but he hasn't got our key yet", n->name, n->hostname); return; } /* Check packet length */ if(inpkt->len < sizeof(inpkt->seqno) + n->inmaclength) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Got too short packet from %s (%s)", n->name, n->hostname); return; } /* Check the message authentication code */ if(n->indigest && n->inmaclength) { inpkt->len -= n->inmaclength; HMAC(n->indigest, n->inkey, n->inkeylength, (unsigned char *) &inpkt->seqno, inpkt->len, (unsigned char *)hmac, NULL); if(memcmp(hmac, (char *) &inpkt->seqno + inpkt->len, n->inmaclength)) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Got unauthenticated packet from %s (%s)", n->name, n->hostname); return; } } /* Decrypt the packet */ if(n->incipher) { outpkt = pkt[nextpkt++]; if(!EVP_DecryptInit_ex(&n->inctx, NULL, NULL, NULL, NULL) || !EVP_DecryptUpdate(&n->inctx, (unsigned char *) &outpkt->seqno, &outlen, (unsigned char *) &inpkt->seqno, inpkt->len) || !EVP_DecryptFinal_ex(&n->inctx, (unsigned char *) &outpkt->seqno + outlen, &outpad)) { ifdebug(TRAFFIC) logger(LOG_DEBUG, "Error decrypting packet from %s (%s): %s", n->name, n->hostname, ERR_error_string(ERR_get_error(), NULL)); return; } outpkt->len = outlen + outpad; inpkt = outpkt; } /* Check the sequence number */ inpkt->len -= sizeof(inpkt->seqno); inpkt->seqno = ntohl(inpkt->seqno); if(replaywin) { if(inpkt->seqno != n->received_seqno + 1) { if(inpkt->seqno >= n->received_seqno + replaywin * 8) { if(n->farfuture++ < replaywin >> 2) { logger(LOG_WARNING, "Packet from %s (%s) is %d seqs in the future, dropped (%u)", n->name, n->hostname, inpkt->seqno - n->received_seqno - 1, n->farfuture); return; } logger(LOG_WARNING, "Lost %d packets from %s (%s)", inpkt->seqno - n->received_seqno - 1, n->name, n->hostname); memset(n->late, 0, replaywin); } else if (inpkt->seqno <= n->received_seqno) { if((n->received_seqno >= replaywin * 8 && inpkt->seqno <= n->received_seqno - replaywin * 8) || !(n->late[(inpkt->seqno / 8) % replaywin] & (1 << inpkt->seqno % 8))) { logger(LOG_WARNING, "Got late or replayed packet from %s (%s), seqno %d, last received %d", n->name, n->hostname, inpkt->seqno, n->received_seqno); return; } } else { for(i = n->received_seqno + 1; i < inpkt->seqno; i++) n->late[(i / 8) % replaywin] |= 1 << i % 8; } } n->farfuture = 0; n->late[(inpkt->seqno / 8) % replaywin] &= ~(1 << inpkt->seqno % 8); } if(inpkt->seqno > n->received_seqno) n->received_seqno = inpkt->seqno; if(n->received_seqno > MAX_SEQNO) keyexpires = 0; /* Decompress the packet */ length_t origlen = inpkt->len; if(n->incompression) { outpkt = pkt[nextpkt++]; if((outpkt->len = uncompress_packet(outpkt->data, inpkt->data, inpkt->len, n->incompression)) < 0) { ifdebug(TRAFFIC) logger(LOG_ERR, "Error while uncompressing packet from %s (%s)", n->name, n->hostname); return; } inpkt = outpkt; origlen -= MTU/64 + 20; } inpkt->priority = 0; if(!inpkt->data[12] && !inpkt->data[13]) mtu_probe_h(n, inpkt, origlen); else receive_packet(n, inpkt); } void receive_tcppacket(connection_t *c, const char *buffer, int len) { vpn_packet_t outpkt; outpkt.len = len; if(c->options & OPTION_TCPONLY) outpkt.priority = 0; else outpkt.priority = -1; memcpy(outpkt.data, buffer, len); receive_packet(c->node, &outpkt); } static void send_udppacket(node_t *n, vpn_packet_t *origpkt) { vpn_packet_t pkt1, pkt2; vpn_packet_t *pkt[] = { &pkt1, &pkt2, &pkt1, &pkt2 }; vpn_packet_t *inpkt = origpkt; int nextpkt = 0; vpn_packet_t *outpkt; int origlen; int outlen, outpad; #if defined(SOL_IP) && defined(IP_TOS) static int priority = 0; #endif int origpriority; if(!n->status.reachable) { ifdebug(TRAFFIC) logger(LOG_INFO, "Trying to send UDP packet to unreachable node %s (%s)", n->name, n->hostname); return; } /* Make sure we have a valid key */ if(!n->status.validkey) { ifdebug(TRAFFIC) logger(LOG_INFO, "No valid key known yet for %s (%s), forwarding via TCP", n->name, n->hostname); if(n->last_req_key + 10 <= now) { send_req_key(n); n->last_req_key = now; } send_tcppacket(n->nexthop->connection, origpkt); return; } if(n->options & OPTION_PMTU_DISCOVERY && inpkt->len > n->minmtu && (inpkt->data[12] | inpkt->data[13])) { ifdebug(TRAFFIC) logger(LOG_INFO, "Packet for %s (%s) larger than minimum MTU, forwarding via %s", n->name, n->hostname, n != n->nexthop ? n->nexthop->name : "TCP"); if(n != n->nexthop) send_packet(n->nexthop, origpkt); else send_tcppacket(n->nexthop->connection, origpkt); return; } origlen = inpkt->len; origpriority = inpkt->priority; /* Compress the packet */ if(n->outcompression) { outpkt = pkt[nextpkt++]; if((outpkt->len = compress_packet(outpkt->data, inpkt->data, inpkt->len, n->outcompression)) < 0) { ifdebug(TRAFFIC) logger(LOG_ERR, "Error while compressing packet to %s (%s)", n->name, n->hostname); return; } inpkt = outpkt; } /* Add sequence number */ inpkt->seqno = htonl(++(n->sent_seqno)); inpkt->len += sizeof(inpkt->seqno); /* Encrypt the packet */ if(n->outcipher) { outpkt = pkt[nextpkt++]; if(!EVP_EncryptInit_ex(&n->outctx, NULL, NULL, NULL, NULL) || !EVP_EncryptUpdate(&n->outctx, (unsigned char *) &outpkt->seqno, &outlen, (unsigned char *) &inpkt->seqno, inpkt->len) || !EVP_EncryptFinal_ex(&n->outctx, (unsigned char *) &outpkt->seqno + outlen, &outpad)) { ifdebug(TRAFFIC) logger(LOG_ERR, "Error while encrypting packet to %s (%s): %s", n->name, n->hostname, ERR_error_string(ERR_get_error(), NULL)); goto end; } outpkt->len = outlen + outpad; inpkt = outpkt; } /* Add the message authentication code */ if(n->outdigest && n->outmaclength) { HMAC(n->outdigest, n->outkey, n->outkeylength, (unsigned char *) &inpkt->seqno, inpkt->len, (unsigned char *) &inpkt->seqno + inpkt->len, NULL); inpkt->len += n->outmaclength; } /* Determine which socket we have to use */ if(n->address.sa.sa_family != listen_socket[n->sock].sa.sa.sa_family) { for(int sock = 0; sock < listen_sockets; sock++) { if(n->address.sa.sa_family == listen_socket[sock].sa.sa.sa_family) { n->sock = sock; break; } } } /* Send the packet */ struct sockaddr *sa; socklen_t sl; int sock; sockaddr_t broadcast; /* Overloaded use of priority field: -1 means local broadcast */ if(origpriority == -1 && n->prevedge) { sock = rand() % listen_sockets; memset(&broadcast, 0, sizeof broadcast); if(listen_socket[sock].sa.sa.sa_family == AF_INET6) { broadcast.in6.sin6_family = AF_INET6; broadcast.in6.sin6_addr.s6_addr[0x0] = 0xff; broadcast.in6.sin6_addr.s6_addr[0x1] = 0x02; broadcast.in6.sin6_addr.s6_addr[0xf] = 0x01; broadcast.in6.sin6_port = n->prevedge->address.in.sin_port; broadcast.in6.sin6_scope_id = listen_socket[sock].sa.in6.sin6_scope_id; } else { broadcast.in.sin_family = AF_INET; broadcast.in.sin_addr.s_addr = -1; broadcast.in.sin_port = n->prevedge->address.in.sin_port; } sa = &broadcast.sa; sl = SALEN(broadcast.sa); } else { if(origpriority == -1) origpriority = 0; sa = &(n->address.sa); sl = SALEN(n->address.sa); sock = n->sock; } #if defined(SOL_IP) && defined(IP_TOS) if(priorityinheritance && origpriority != priority && listen_socket[n->sock].sa.sa.sa_family == AF_INET) { priority = origpriority; ifdebug(TRAFFIC) logger(LOG_DEBUG, "Setting outgoing packet priority to %d", priority); if(setsockopt(listen_socket[n->sock].udp, SOL_IP, IP_TOS, &priority, sizeof(priority))) /* SO_PRIORITY doesn't seem to work */ logger(LOG_ERR, "System call `%s' failed: %s", "setsockopt", strerror(errno)); } #endif if(sendto(listen_socket[sock].udp, (char *) &inpkt->seqno, inpkt->len, 0, sa, sl) < 0 && !sockwouldblock(sockerrno)) { if(sockmsgsize(sockerrno)) { if(n->maxmtu >= origlen) n->maxmtu = origlen - 1; if(n->mtu >= origlen) n->mtu = origlen - 1; } else ifdebug(TRAFFIC) logger(LOG_WARNING, "Error sending packet to %s (%s): %s", n->name, n->hostname, sockstrerror(sockerrno)); } end: origpkt->len = origlen; } /* send a packet to the given vpn ip. */ void send_packet(const node_t *n, vpn_packet_t *packet) { node_t *via; if(n == myself) { if(overwrite_mac) memcpy(packet->data, mymac.x, ETH_ALEN); devops.write(packet); return; } ifdebug(TRAFFIC) logger(LOG_ERR, "Sending packet of %d bytes to %s (%s)", packet->len, n->name, n->hostname); if(!n->status.reachable) { ifdebug(TRAFFIC) logger(LOG_INFO, "Node %s (%s) is not reachable", n->name, n->hostname); return; } via = (packet->priority == -1 || n->via == myself) ? n->nexthop : n->via; if(via != n) ifdebug(TRAFFIC) logger(LOG_INFO, "Sending packet to %s via %s (%s)", n->name, via->name, n->via->hostname); if(packet->priority == -1 || ((myself->options | via->options) & OPTION_TCPONLY)) { if(!send_tcppacket(via->connection, packet)) terminate_connection(via->connection, true); } else send_udppacket(via, packet); } /* Broadcast a packet using the minimum spanning tree */ void broadcast_packet(const node_t *from, vpn_packet_t *packet) { avl_node_t *node; connection_t *c; node_t *n; // Always give ourself a copy of the packet. if(from != myself) send_packet(myself, packet); // In TunnelServer mode, do not forward broadcast packets. // The MST might not be valid and create loops. if(tunnelserver || broadcast_mode == BMODE_NONE) return; ifdebug(TRAFFIC) logger(LOG_INFO, "Broadcasting packet of %d bytes from %s (%s)", packet->len, from->name, from->hostname); switch(broadcast_mode) { // In MST mode, broadcast packets travel via the Minimum Spanning Tree. // This guarantees all nodes receive the broadcast packet, and // usually distributes the sending of broadcast packets over all nodes. case BMODE_MST: for(node = connection_tree->head; node; node = node->next) { c = node->data; if(c->status.active && c->status.mst && c != from->nexthop->connection) send_packet(c->node, packet); } break; // In direct mode, we send copies to each node we know of. // However, this only reaches nodes that can be reached in a single hop. // We don't have enough information to forward broadcast packets in this case. case BMODE_DIRECT: if(from != myself) break; for(node = node_udp_tree->head; node; node = node->next) { n = node->data; if(n->status.reachable && ((n->via == myself && n->nexthop == n) || n->via == n)) send_packet(n, packet); } break; default: break; } } static node_t *try_harder(const sockaddr_t *from, const vpn_packet_t *pkt) { avl_node_t *node; edge_t *e; node_t *n = NULL; bool hard = false; static time_t last_hard_try = 0; for(node = edge_weight_tree->head; node; node = node->next) { e = node->data; if(e->to == myself) continue; if(sockaddrcmp_noport(from, &e->address)) { if(last_hard_try == now) continue; hard = true; } if(!try_mac(e->to, pkt)) continue; n = e->to; break; } if(hard) last_hard_try = now; last_hard_try = now; return n; } void handle_incoming_vpn_data(int sock) { vpn_packet_t pkt; char *hostname; sockaddr_t from; socklen_t fromlen = sizeof(from); node_t *n; pkt.len = recvfrom(listen_socket[sock].udp, (char *) &pkt.seqno, MAXSIZE, 0, &from.sa, &fromlen); if(pkt.len < 0) { if(!sockwouldblock(sockerrno)) logger(LOG_ERR, "Receiving packet failed: %s", sockstrerror(sockerrno)); return; } sockaddrunmap(&from); /* Some braindead IPv6 implementations do stupid things. */ n = lookup_node_udp(&from); if(!n) { n = try_harder(&from, &pkt); if(n) update_node_udp(n, &from); else ifdebug(PROTOCOL) { hostname = sockaddr2hostname(&from); logger(LOG_WARNING, "Received UDP packet from unknown source %s", hostname); free(hostname); return; } else return; } n->sock = sock; receive_udppacket(n, &pkt); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5579_0
crossvul-cpp_data_good_552_2
/* * Copyright (C) 1997-2005 Jon Nelson <jnelson@boa.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 1, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* $Id: index_dir.c,v 1.32.2.7 2005/02/22 03:00:24 jnelson Exp $*/ #include "config.h" #include <stdio.h> #include <sys/stat.h> #include <limits.h> /* for PATH_MAX */ #include <time.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include "compat.h" #define MAX_FILE_LENGTH MAXNAMLEN #define MAX_PATH_LENGTH PATH_MAX #define INT_TO_HEX(x) \ ((((x)-10)>=0)?('A'+((x)-10)):('0'+(x))) #include "escape.h" char *html_escape_string(const char *inp, char *dest, const unsigned int len); char *http_escape_string(const char *inp, char *buf, const unsigned int len); int select_files(const struct dirent *d); int index_directory(char *dir, char *title); void send_error(int error); /* * Name: html_escape_string */ char *html_escape_string(const char *inp, char *dest, const unsigned int len) { int max; char *buf; unsigned char c; max = len * 6; if (dest == NULL && max) dest = malloc(sizeof (unsigned char) * (max + 1)); if (dest == NULL) return NULL; buf = dest; while ((c = *inp++)) { switch (c) { case '>': *dest++ = '&'; *dest++ = 'g'; *dest++ = 't'; *dest++ = ';'; break; case '<': *dest++ = '&'; *dest++ = 'l'; *dest++ = 't'; *dest++ = ';'; break; case '&': *dest++ = '&'; *dest++ = 'a'; *dest++ = 'm'; *dest++ = 'p'; *dest++ = ';'; break; case '\"': *dest++ = '&'; *dest++ = 'q'; *dest++ = 'u'; *dest++ = 'o'; *dest++ = 't'; *dest++ = ';'; break; default: *dest++ = c; } } *dest = '\0'; return buf; } /* * Name: escape_string * * Description: escapes the string inp. Uses variable buf. If buf is * NULL when the program starts, it will attempt to dynamically allocate * the space that it needs, otherwise it will assume that the user * has already allocated enough space for the variable buf, which * could be up to 3 times the size of inp. If the routine dynamically * allocates the space, the user is responsible for freeing it afterwords * Returns: NULL on error, pointer to string otherwise. */ char *http_escape_string(const char *inp, char *buf, const unsigned int len) { int max; char *index_c; unsigned char c; int found_a_colon = 0; max = len * 3; if (buf == NULL && max) buf = malloc(sizeof (unsigned char) * (max + 1)); if (buf == NULL) return NULL; index_c = buf; while ((c = *inp++)) { if (c == ':' && !found_a_colon && index_c > buf) { found_a_colon = 1; memmove(buf + 2, buf, (index_c - buf)); *buf = '.'; *(buf + 1) = '/'; index_c += 2; *index_c++ = ':'; } else if (needs_escape((unsigned int) c) || c == '?') { *index_c++ = '%'; *index_c++ = INT_TO_HEX((c >> 4) & 0xf); *index_c++ = INT_TO_HEX(c & 0xf); } else *index_c++ = c; } *index_c = '\0'; return buf; } void send_error(int error) { const char *the_error; switch (error) { case 1: the_error = "Not enough arguments were passed to the indexer."; break; case 2: the_error = "The Directory Sorter ran out of Memory"; break; case 3: the_error = "The was a problem changing to the appropriate directory."; break; case 4: the_error = "There was an error escaping a string."; case 5: the_error = "Too many arguments were passed to the indexer."; break; case 6: the_error = "No files in this directory."; break; default: the_error = "An unknown error occurred producing the directory."; break; } printf("<html>\n<head>\n<title>\n%s\n</title>\n" "<body>\n%s\n</body>\n</html>\n", the_error, the_error); } int select_files(const struct dirent *dirbuf) { if (dirbuf->d_name[0] == '.') return 0; else return 1; } /* * Name: index_directory * Description: Called from get_dir_mapping if a directory html * has to be generated on the fly * If no_slash is true, prepend slashes to hrefs * returns -1 for problem, else 0 */ int index_directory(char *dir, char *title) { struct dirent *dirbuf; int numdir; struct dirent **array; struct stat statbuf; char http_filename[MAX_FILE_LENGTH * 3]; char html_filename[MAX_FILE_LENGTH * 6]; char escaped_filename[MAX_FILE_LENGTH * 18]; /* *both* http and html escape */ int i; if (chdir(dir) == -1) { send_error(3); return -1; } numdir = scandir(".", &array, select_files, alphasort); if (numdir == -1) { send_error(2); return -1; } else if (numdir == -2) { send_error(6); return -1; } if (html_escape_string(title, html_filename, strlen(title)) == NULL) { send_error(4); return -1; } printf("<html>\n" "<head>\n<title>Index of %s</title>\n</head>\n\n" "<body bgcolor=\"#ffffff\">\n" "<H2>Index of %s</H2>\n" "<table>\n%s", html_filename, html_filename, (strcmp(title, "/") == 0 ? "" : "<tr><td colspan=3><h3>Directories</h3></td></tr>" "<tr><td colspan=3><a href=\"../\">Parent Directory</a></td></tr>\n")); for (i = 0; i < numdir; ++i) { dirbuf = array[i]; if (stat(dirbuf->d_name, &statbuf) == -1) continue; if (!S_ISDIR(statbuf.st_mode)) continue; if (html_escape_string(dirbuf->d_name, html_filename, NAMLEN(dirbuf)) == NULL) { send_error(4); return -1; } if (http_escape_string(dirbuf->d_name, http_filename, NAMLEN(dirbuf)) == NULL) { send_error(4); return -1; } if (html_escape_string(http_filename, escaped_filename, strlen(http_filename)) == NULL) { send_error(4); return -1; } printf("<tr>" "<td width=\"40%%\"><a href=\"%s/\">%s/</a></td>" "<td align=right>%s</td>" "<td align=right>" PRINTF_OFF_T_ARG " bytes</td>" "</tr>\n", escaped_filename, html_filename, ctime(&statbuf.st_mtime), (off_t) statbuf.st_size); } printf ("<tr><td colspan=3>&nbsp;</td></tr>\n<tr><td colspan=3><h3>Files</h3></td></tr>\n"); for (i = 0; i < numdir; ++i) { int len; dirbuf = array[i]; if (stat(dirbuf->d_name, &statbuf) == -1) continue; if (S_ISDIR(statbuf.st_mode)) continue; if (html_escape_string(dirbuf->d_name, html_filename, NAMLEN(dirbuf)) == NULL) { send_error(4); return -1; } if (http_escape_string(dirbuf->d_name, http_filename, NAMLEN(dirbuf)) == NULL) { send_error(4); return -1; } len = strlen(http_filename); #ifdef GUNZIP if (len > 3 && !memcmp(http_filename + len - 3, ".gz", 3)) { http_filename[len - 3] = '\0'; html_filename[strlen(html_filename) - 3] = '\0'; if (html_escape_string(http_filename, escaped_filename, strlen(http_filename)) == NULL) { send_error(4); return -1; } printf("<tr>" "<td width=\"40%%\"><a href=\"%s\">%s</a> " "<a href=\"%s.gz\">(.gz)</a></td>" "<td align=right>%s</td>" "<td align=right>" PRINTF_OFF_T_ARG "bytes</td>" "</tr>\n", escaped_filename, html_filename, http_filename, ctime(&statbuf.st_mtime), (off_t) statbuf.st_size); } else { #endif if (html_escape_string(http_filename, escaped_filename, strlen(http_filename)) == NULL) { send_error(4); return -1; } printf("<tr>" "<td width=\"40%%\"><a href=\"%s\">%s</a></td>" "<td align=right>%s</td>" "<td align=right>" PRINTF_OFF_T_ARG "bytes</td>" "</tr>\n", escaped_filename, html_filename, ctime(&statbuf.st_mtime), (off_t) statbuf.st_size); #ifdef GUNZIP } #endif } /* hey -- even though this is a one-shot deal, we should * still free memory we ought to free * You never know -- this code might get used elsewhere! */ for (i = 0; i < numdir; ++i) { free(array[i]); array[i] = NULL; } free(array); array = NULL; return 0; /* success */ } int main(int argc, char *argv[]) { time_t timep; struct tm *timeptr; char *now; if (argc < 3) { send_error(1); return -1; } else if (argc > 3) { send_error(5); return -1; } build_needs_escape(); if (argv[2] == NULL) index_directory(argv[1], argv[1]); else index_directory(argv[1], argv[2]); time(&timep); #ifdef USE_LOCALTIME timeptr = localtime(&timep); #else timeptr = gmtime(&timep); #endif now = strdup(asctime(timeptr)); if (!now) { return -1; } now[strlen(now) - 1] = '\0'; #ifdef USE_LOCALTIME printf("</table>\n<hr noshade>\nIndex generated %s %s\n" "<!-- This program is part of the Boa Webserver Copyright (C) 1991-2002 http://www.boa.org -->\n" "</body>\n</html>\n", now, TIMEZONE(timeptr)); #else printf("</table>\n<hr noshade>\nIndex generated %s UTC\n" "<!-- This program is part of the Boa Webserver Copyright (C) 1991-2002 http://www.boa.org -->\n" "</body>\n</html>\n", now); #endif free(now); return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_552_2
crossvul-cpp_data_bad_4409_0
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io */ #include <uapi/linux/btf.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/btf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include <linux/bsearch.h> #include <linux/sort.h> #include <linux/perf_event.h> #include <linux/ctype.h> #include <linux/error-injection.h> #include <linux/bpf_lsm.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE #undef BPF_LINK_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are * four pointer types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. * * The following reference types represent a potential reference to a kernel * resource which, after first being allocated, must be checked and freed by * the BPF program: * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET * * When the verifier sees a helper call return a reference type, it allocates a * pointer id for the reference and stores it in the current function state. * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type * passes through a NULL-check conditional. For the branch wherein the state is * changed to CONST_IMM, the verifier releases the reference. * * For each helper function that allocates a reference, such as * bpf_sk_lookup_tcp(), there is a corresponding release function, such as * bpf_sk_release(). When a reference type passes into the release function, * the verifier also releases the reference. If any unchecked or unreleased * reference remains at the end of the program, the verifier rejects it. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; /* length of verifier log at the time this state was pushed on stack */ u32 log_pos; }; #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 #define BPF_COMPLEXITY_LIMIT_STATES 64 #define BPF_MAP_KEY_POISON (1ULL << 63) #define BPF_MAP_KEY_SEEN (1ULL << 62) #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv) { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); aux->map_ptr_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); } static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) { return aux->map_key_state & BPF_MAP_KEY_POISON; } static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) { return !(aux->map_key_state & BPF_MAP_KEY_SEEN); } static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) { return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); } static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) { bool poisoned = bpf_map_key_poisoned(aux); aux->map_key_state = state | BPF_MAP_KEY_SEEN | (poisoned ? BPF_MAP_KEY_POISON : 0ULL); } struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; int mem_size; u64 msize_max_value; int ref_obj_id; int func_id; u32 btf_id; }; struct btf *btf_vmlinux; static DEFINE_MUTEX(bpf_verifier_lock); static const struct bpf_line_info * find_linfo(const struct bpf_verifier_env *env, u32 insn_off) { const struct bpf_line_info *linfo; const struct bpf_prog *prog; u32 i, nr_linfo; prog = env->prog; nr_linfo = prog->aux->nr_linfo; if (!nr_linfo || insn_off >= prog->len) return NULL; linfo = prog->aux->linfo; for (i = 1; i < nr_linfo; i++) if (insn_off < linfo[i].insn_off) break; return &linfo[i - 1]; } void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args) { unsigned int n; n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (log->level == BPF_LOG_KERNEL) { pr_err("BPF:%s\n", log->kbuf); return; } if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) { char zero = 0; if (!bpf_verifier_log_needed(log)) return; log->len_used = new_pos; if (put_user(zero, log->ubuf + new_pos)) log->ubuf = NULL; } /* log_level controls verbosity level of eBPF verifier. * bpf_verifier_log_write() is used to dump the verification trace to the log, * so the user can figure out what's wrong with the program */ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } EXPORT_SYMBOL_GPL(bpf_verifier_log_write); __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) { struct bpf_verifier_env *env = private_data; va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(log)) return; va_start(args, fmt); bpf_verifier_vlog(log, fmt, args); va_end(args); } static const char *ltrim(const char *s) { while (isspace(*s)) s++; return s; } __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, u32 insn_off, const char *prefix_fmt, ...) { const struct bpf_line_info *linfo; if (!bpf_verifier_log_needed(&env->log)) return; linfo = find_linfo(env, insn_off); if (!linfo || linfo == env->prev_linfo) return; if (prefix_fmt) { va_list args; va_start(args, prefix_fmt); bpf_verifier_vlog(&env->log, prefix_fmt, args); va_end(args); } verbose(env, "%s\n", ltrim(btf_name_by_offset(env->prog->aux->btf, linfo->line_off))); env->prev_linfo = linfo; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } static bool type_is_sk_pointer(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_SOCK_COMMON || type == PTR_TO_TCP_SOCK || type == PTR_TO_XDP_SOCK; } static bool reg_type_not_null(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_TCP_SOCK || type == PTR_TO_MAP_VALUE || type == PTR_TO_SOCK_COMMON; } static bool reg_type_may_be_null(enum bpf_reg_type type) { return type == PTR_TO_MAP_VALUE_OR_NULL || type == PTR_TO_SOCKET_OR_NULL || type == PTR_TO_SOCK_COMMON_OR_NULL || type == PTR_TO_TCP_SOCK_OR_NULL || type == PTR_TO_BTF_ID_OR_NULL || type == PTR_TO_MEM_OR_NULL || type == PTR_TO_RDONLY_BUF_OR_NULL || type == PTR_TO_RDWR_BUF_OR_NULL; } static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) { return reg->type == PTR_TO_MAP_VALUE && map_value_has_spin_lock(reg->map_ptr); } static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) { return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL || type == PTR_TO_TCP_SOCK || type == PTR_TO_TCP_SOCK_OR_NULL || type == PTR_TO_MEM || type == PTR_TO_MEM_OR_NULL; } static bool arg_type_may_be_refcounted(enum bpf_arg_type type) { return type == ARG_PTR_TO_SOCK_COMMON; } /* Determine whether the function releases some resources allocated by another * function call. The first reference type argument will be assumed to be * released by release_reference(). */ static bool is_release_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_release || func_id == BPF_FUNC_ringbuf_submit || func_id == BPF_FUNC_ringbuf_discard; } static bool may_be_acquire_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_lookup_tcp || func_id == BPF_FUNC_sk_lookup_udp || func_id == BPF_FUNC_skc_lookup_tcp || func_id == BPF_FUNC_map_lookup_elem || func_id == BPF_FUNC_ringbuf_reserve; } static bool is_acquire_function(enum bpf_func_id func_id, const struct bpf_map *map) { enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; if (func_id == BPF_FUNC_sk_lookup_tcp || func_id == BPF_FUNC_sk_lookup_udp || func_id == BPF_FUNC_skc_lookup_tcp || func_id == BPF_FUNC_ringbuf_reserve) return true; if (func_id == BPF_FUNC_map_lookup_elem && (map_type == BPF_MAP_TYPE_SOCKMAP || map_type == BPF_MAP_TYPE_SOCKHASH)) return true; return false; } static bool is_ptr_cast_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_tcp_sock || func_id == BPF_FUNC_sk_fullsock; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", [PTR_TO_FLOW_KEYS] = "flow_keys", [PTR_TO_SOCKET] = "sock", [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", [PTR_TO_SOCK_COMMON] = "sock_common", [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", [PTR_TO_TCP_SOCK] = "tcp_sock", [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", [PTR_TO_TP_BUFFER] = "tp_buffer", [PTR_TO_XDP_SOCK] = "xdp_sock", [PTR_TO_BTF_ID] = "ptr_", [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", [PTR_TO_MEM] = "mem", [PTR_TO_MEM_OR_NULL] = "mem_or_null", [PTR_TO_RDONLY_BUF] = "rdonly_buf", [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", [PTR_TO_RDWR_BUF] = "rdwr_buf", [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", }; static char slot_type_char[] = { [STACK_INVALID] = '?', [STACK_SPILL] = 'r', [STACK_MISC] = 'm', [STACK_ZERO] = '0', }; static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) verbose(env, "_"); if (live & REG_LIVE_READ) verbose(env, "r"); if (live & REG_LIVE_WRITTEN) verbose(env, "w"); if (live & REG_LIVE_DONE) verbose(env, "D"); } static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[reg->frameno]; } const char *kernel_type_name(u32 id) { return btf_name_by_offset(btf_vmlinux, btf_type_by_id(btf_vmlinux, id)->name_off); } static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { const struct bpf_reg_state *reg; enum bpf_reg_type t; int i; if (state->frameno) verbose(env, " frame%d:", state->frameno); for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d", i); print_liveness(env, reg->live); verbose(env, "=%s", reg_type_str[t]); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL) verbose(env, "%s", kernel_type_name(reg->btf_id)); verbose(env, "(id=%d", reg->id); if (reg_type_may_be_refcounted_or_null(t)) verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } if (reg->s32_min_value != reg->smin_value && reg->s32_min_value != S32_MIN) verbose(env, ",s32_min_value=%d", (int)(reg->s32_min_value)); if (reg->s32_max_value != reg->smax_value && reg->s32_max_value != S32_MAX) verbose(env, ",s32_max_value=%d", (int)(reg->s32_max_value)); if (reg->u32_min_value != reg->umin_value && reg->u32_min_value != U32_MIN) verbose(env, ",u32_min_value=%d", (int)(reg->u32_min_value)); if (reg->u32_max_value != reg->umax_value && reg->u32_max_value != U32_MAX) verbose(env, ",u32_max_value=%d", (int)(reg->u32_max_value)); } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { char types_buf[BPF_REG_SIZE + 1]; bool valid = false; int j; for (j = 0; j < BPF_REG_SIZE; j++) { if (state->stack[i].slot_type[j] != STACK_INVALID) valid = true; types_buf[j] = slot_type_char[ state->stack[i].slot_type[j]]; } types_buf[BPF_REG_SIZE] = 0; if (!valid) continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); if (state->stack[i].slot_type[0] == STACK_SPILL) { reg = &state->stack[i].spilled_ptr; t = reg->type; verbose(env, "=%s", reg_type_str[t]); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "=%s", types_buf); } } if (state->acquired_refs && state->refs[0].id) { verbose(env, " refs=%d", state->refs[0].id); for (i = 1; i < state->acquired_refs; i++) if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } verbose(env, "\n"); } #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int copy_##NAME##_state(struct bpf_func_state *dst, \ const struct bpf_func_state *src) \ { \ if (!src->FIELD) \ return 0; \ if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ /* internal bug, make state invalid to reject the program */ \ memset(dst, 0, sizeof(*dst)); \ return -EFAULT; \ } \ memcpy(dst->FIELD, src->FIELD, \ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ return 0; \ } /* copy_reference_state() */ COPY_STATE_FN(reference, acquired_refs, refs, 1) /* copy_stack_state() */ COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef COPY_STATE_FN #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ bool copy_old) \ { \ u32 old_size = state->COUNT; \ struct bpf_##NAME##_state *new_##FIELD; \ int slot = size / SIZE; \ \ if (size <= old_size || !size) { \ if (copy_old) \ return 0; \ state->COUNT = slot * SIZE; \ if (!size && old_size) { \ kfree(state->FIELD); \ state->FIELD = NULL; \ } \ return 0; \ } \ new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ GFP_KERNEL); \ if (!new_##FIELD) \ return -ENOMEM; \ if (copy_old) { \ if (state->FIELD) \ memcpy(new_##FIELD, state->FIELD, \ sizeof(*new_##FIELD) * (old_size / SIZE)); \ memset(new_##FIELD + old_size / SIZE, 0, \ sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ } \ state->COUNT = slot * SIZE; \ kfree(state->FIELD); \ state->FIELD = new_##FIELD; \ return 0; \ } /* realloc_reference_state() */ REALLOC_STATE_FN(reference, acquired_refs, refs, 1) /* realloc_stack_state() */ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef REALLOC_STATE_FN /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which realloc_stack_state() copies over. It points to previous * bpf_verifier_state which is never reallocated. */ static int realloc_func_state(struct bpf_func_state *state, int stack_size, int refs_size, bool copy_old) { int err = realloc_reference_state(state, refs_size, copy_old); if (err) return err; return realloc_stack_state(state, stack_size, copy_old); } /* Acquire a pointer id from the env and update the state->refs to include * this new pointer reference. * On success, returns a valid pointer id to associate with the register * On failure, returns a negative errno. */ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) { struct bpf_func_state *state = cur_func(env); int new_ofs = state->acquired_refs; int id, err; err = realloc_reference_state(state, state->acquired_refs + 1, true); if (err) return err; id = ++env->id_gen; state->refs[new_ofs].id = id; state->refs[new_ofs].insn_idx = insn_idx; return id; } /* release function corresponding to acquire_reference_state(). Idempotent. */ static int release_reference_state(struct bpf_func_state *state, int ptr_id) { int i, last_idx; last_idx = state->acquired_refs - 1; for (i = 0; i < state->acquired_refs; i++) { if (state->refs[i].id == ptr_id) { if (last_idx && i != last_idx) memcpy(&state->refs[i], &state->refs[last_idx], sizeof(*state->refs)); memset(&state->refs[last_idx], 0, sizeof(*state->refs)); state->acquired_refs--; return 0; } } return -EINVAL; } static int transfer_reference_state(struct bpf_func_state *dst, struct bpf_func_state *src) { int err = realloc_reference_state(dst, src->acquired_refs, false); if (err) return err; err = copy_reference_state(dst, src); if (err) return err; return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; kfree(state->refs); kfree(state->stack); kfree(state); } static void clear_jmp_history(struct bpf_verifier_state *state) { kfree(state->jmp_history); state->jmp_history = NULL; state->jmp_history_cnt = 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { int i; for (i = 0; i <= state->curframe; i++) { free_func_state(state->frame[i]); state->frame[i] = NULL; } clear_jmp_history(state); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); err = copy_reference_state(dst, src); if (err) return err; return copy_stack_state(dst, src); } static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; int i, err; if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { kfree(dst_state->jmp_history); dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); if (!dst_state->jmp_history) return -ENOMEM; } memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); dst_state->jmp_history_cnt = src->jmp_history_cnt; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->speculative = src->speculative; dst_state->curframe = src->curframe; dst_state->active_spin_lock = src->active_spin_lock; dst_state->branches = src->branches; dst_state->parent = src->parent; dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; } static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { while (st) { u32 br = --st->branches; /* WARN_ON(br > 1) technically makes sense here, * but see comment in push_stack(), hence: */ WARN_ONCE((int)br < 0, "BUG update_branch_counts:branches_to_explore=%d\n", br); if (br) break; st = st->parent; } } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx, bool pop_log) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (pop_log) bpf_vlog_reset(&env->log, head->log_pos); if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx, bool speculative) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; elem->log_pos = env->log.len_used; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; elem->st.speculative |= speculative; if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { verbose(env, "The sequence of %d jumps is too complex.\n", env->stack_size); goto err; } if (elem->st.parent) { ++elem->st.parent->branches; /* WARN_ON(branches > 2) technically makes sense here, * but * 1. speculative states will bump 'branches' for non-branch * instructions * 2. is_state_visited() heuristics may decide not to create * a new state for a sequence of branches and all such current * and cloned states will be pointing to a single parent state * which might have large 'branches' count. */ } return &elem->st; err: free_verifier_state(env->cur_state, true); env->cur_state = NULL; /* pop all elements and return */ while (!pop_stack(env, NULL, NULL, false)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { /* Clear id, off, and union(map_ptr, range) */ memset(((u8 *)reg) + sizeof(reg->type), 0, offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; reg->s32_min_value = (s32)imm; reg->s32_max_value = (s32)imm; reg->u32_min_value = (u32)imm; reg->u32_max_value = (u32)imm; } static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) { reg->var_off = tnum_const_subreg(reg->var_off, imm); reg->s32_min_value = (s32)imm; reg->s32_max_value = (s32)imm; reg->u32_min_value = (u32)imm; reg->u32_max_value = (u32)imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); reg->type = SCALAR_VALUE; } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(env, regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; reg->s32_min_value = S32_MIN; reg->s32_max_value = S32_MAX; reg->u32_min_value = 0; reg->u32_max_value = U32_MAX; } static void __mark_reg64_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } static void __mark_reg32_unbounded(struct bpf_reg_state *reg) { reg->s32_min_value = S32_MIN; reg->s32_max_value = S32_MAX; reg->u32_min_value = 0; reg->u32_max_value = U32_MAX; } static void __update_reg32_bounds(struct bpf_reg_state *reg) { struct tnum var32_off = tnum_subreg(reg->var_off); /* min signed is max(sign bit) | min(other bits) */ reg->s32_min_value = max_t(s32, reg->s32_min_value, var32_off.value | (var32_off.mask & S32_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->s32_max_value = min_t(s32, reg->s32_max_value, var32_off.value | (var32_off.mask & S32_MAX)); reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); reg->u32_max_value = min(reg->u32_max_value, (u32)(var32_off.value | var32_off.mask)); } static void __update_reg64_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } static void __update_reg_bounds(struct bpf_reg_state *reg) { __update_reg32_bounds(reg); __update_reg64_bounds(reg); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg32_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s32)reg->u32_max_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->s32_min_value = reg->u32_min_value; reg->s32_max_value = reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); } else if ((s32)reg->u32_min_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->s32_min_value = reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); reg->s32_max_value = reg->u32_max_value; } } static void __reg64_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } static void __reg_deduce_bounds(struct bpf_reg_state *reg) { __reg32_deduce_bounds(reg); __reg64_deduce_bounds(reg); } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { struct tnum var64_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), tnum_range(reg->u32_min_value, reg->u32_max_value)); reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); } static void __reg_assign_32_into_64(struct bpf_reg_state *reg) { reg->umin_value = reg->u32_min_value; reg->umax_value = reg->u32_max_value; /* Attempt to pull 32-bit signed bounds into 64-bit bounds * but must be positive otherwise set to worse case bounds * and refine later from tnum. */ if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) reg->smax_value = reg->s32_max_value; else reg->smax_value = U32_MAX; if (reg->s32_min_value >= 0) reg->smin_value = reg->s32_min_value; else reg->smin_value = 0; } static void __reg_combine_32_into_64(struct bpf_reg_state *reg) { /* special case when 64-bit register has upper 32-bit register * zeroed. Typically happens after zext or <<32, >>32 sequence * allowing us to use 32-bit bounds directly, */ if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { __reg_assign_32_into_64(reg); } else { /* Otherwise the best we can do is push lower 32bit known and * unknown bits into register (var_off set from jmp logic) * then learn as much as possible from the 64-bit tnum * known and unknown bits. The previous smin/smax bounds are * invalid here because of jmp32 compare so mark them unknown * so they do not impact tnum bounds calculation. */ __mark_reg64_unbounded(reg); __update_reg_bounds(reg); } /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __reg_deduce_bounds(reg); __reg_bound_offset(reg); __update_reg_bounds(reg); } static bool __reg64_bound_s32(s64 a) { if (a > S32_MIN && a < S32_MAX) return true; return false; } static bool __reg64_bound_u32(u64 a) { if (a > U32_MIN && a < U32_MAX) return true; return false; } static void __reg_combine_64_into_32(struct bpf_reg_state *reg) { __mark_reg32_unbounded(reg); if (__reg64_bound_s32(reg->smin_value)) reg->s32_min_value = (s32)reg->smin_value; if (__reg64_bound_s32(reg->smax_value)) reg->s32_max_value = (s32)reg->smax_value; if (__reg64_bound_u32(reg->umin_value)) reg->u32_min_value = (u32)reg->umin_value; if (__reg64_bound_u32(reg->umax_value)) reg->u32_max_value = (u32)reg->umax_value; /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __reg_deduce_bounds(reg); __reg_bound_offset(reg); __update_reg_bounds(reg); } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { /* * Clear type, id, off, and union(map_ptr, range) and * padding between 'type' and union */ memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(env, regs + regno); return; } __mark_reg_unknown(env, regs + regno); } static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { __mark_reg_unknown(env, reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(env, regs + regno); return; } __mark_reg_not_init(env, regs + regno); } static void mark_btf_ld_reg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, enum bpf_reg_type reg_type, u32 btf_id) { if (reg_type == SCALAR_VALUE) { mark_reg_unknown(env, regs, regno); return; } mark_reg_known_zero(env, regs, regno); regs[regno].type = PTR_TO_BTF_ID; regs[regno].btf_id = btf_id; } #define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs; int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; regs[i].parent = NULL; regs[i].subreg_def = DEF_NOT_SUBREG; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; } #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static int cmp_subprogs(const void *a, const void *b) { return ((struct bpf_subprog_info *)a)->start - ((struct bpf_subprog_info *)b)->start; } static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; p = bsearch(&off, env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs); if (!p) return -ENOENT; return p - env->subprog_info; } static int add_subprog(struct bpf_verifier_env *env, int off) { int insn_cnt = env->prog->len; int ret; if (off >= insn_cnt || off < 0) { verbose(env, "call to invalid destination\n"); return -EINVAL; } ret = find_subprog(env, off); if (ret >= 0) return 0; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; } env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); return 0; } static int check_subprogs(struct bpf_verifier_env *env) { int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; /* Add entry function. */ ret = add_subprog(env, 0); if (ret < 0) return ret; /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; if (!env->bpf_capable) { verbose(env, "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); return -EPERM; } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; } /* Add a fake 'exit' subprog which could simplify subprog iteration * logic. 'subprog_cnt' should not be increased. */ subprog[env->subprog_cnt].start = insn_cnt; if (env->log.level & BPF_LOG_LEVEL2) for (i = 0; i < env->subprog_cnt; i++) verbose(env, "func#%d @%d\n", i, subprog[i].start); /* now check that all jumps are within the same subprog */ subprog_start = subprog[cur_subprog].start; subprog_end = subprog[cur_subprog + 1].start; for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; off = i + insn[i].off + 1; if (off < subprog_start || off >= subprog_end) { verbose(env, "jump out of range from insn %d to %d\n", i, off); return -EINVAL; } next: if (i == subprog_end - 1) { /* to avoid fall-through from one subprog into another * the last insn of the subprog should be either exit * or unconditional jump back */ if (code != (BPF_JMP | BPF_EXIT) && code != (BPF_JMP | BPF_JA)) { verbose(env, "last insn is not an exit or jmp\n"); return -EINVAL; } subprog_start = subprog_end; cur_subprog++; if (cur_subprog < env->subprog_cnt) subprog_end = subprog[cur_subprog + 1].start; } } return 0; } /* Parentage chain of this register (or stack slot) should take care of all * issues like callee-saved registers, stack slot allocation time, etc. */ static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_reg_state *state, struct bpf_reg_state *parent, u8 flag) { bool writes = parent == state->parent; /* Observe write marks */ int cnt = 0; while (parent) { /* if read wasn't screened by an earlier write ... */ if (writes && state->live & REG_LIVE_WRITTEN) break; if (parent->live & REG_LIVE_DONE) { verbose(env, "verifier BUG type %s var_off %lld off %d\n", reg_type_str[parent->type], parent->var_off.value, parent->off); return -EFAULT; } /* The first condition is more likely to be true than the * second, checked it first. */ if ((parent->live & REG_LIVE_READ) == flag || parent->live & REG_LIVE_READ64) /* The parentage chain never changes and * this parent was already marked as LIVE_READ. * There is no need to keep walking the chain again and * keep re-marking all parents as LIVE_READ. * This case happens when the same register is read * multiple times without writes into it in-between. * Also, if parent has the stronger REG_LIVE_READ64 set, * then no need to set the weak REG_LIVE_READ32. */ break; /* ... then we depend on parent's value */ parent->live |= flag; /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ if (flag == REG_LIVE_READ64) parent->live &= ~REG_LIVE_READ32; state = parent; parent = state->parent; writes = true; cnt++; } if (env->longest_mark_read_walk < cnt) env->longest_mark_read_walk = cnt; return 0; } /* This function is supposed to be used by the following 32-bit optimization * code only. It returns TRUE if the source or destination register operates * on 64-bit, otherwise return FALSE. */ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) { u8 code, class, op; code = insn->code; class = BPF_CLASS(code); op = BPF_OP(code); if (class == BPF_JMP) { /* BPF_EXIT for "main" will reach here. Return TRUE * conservatively. */ if (op == BPF_EXIT) return true; if (op == BPF_CALL) { /* BPF to BPF call will reach here because of marking * caller saved clobber with DST_OP_NO_MARK for which we * don't care the register def because they are anyway * marked as NOT_INIT already. */ if (insn->src_reg == BPF_PSEUDO_CALL) return false; /* Helper call will reach here because of arg type * check, conservatively return TRUE. */ if (t == SRC_OP) return true; return false; } } if (class == BPF_ALU64 || class == BPF_JMP || /* BPF_END always use BPF_ALU class. */ (class == BPF_ALU && op == BPF_END && insn->imm == 64)) return true; if (class == BPF_ALU || class == BPF_JMP32) return false; if (class == BPF_LDX) { if (t != SRC_OP) return BPF_SIZE(code) == BPF_DW; /* LDX source must be ptr. */ return true; } if (class == BPF_STX) { if (reg->type != SCALAR_VALUE) return true; return BPF_SIZE(code) == BPF_DW; } if (class == BPF_LD) { u8 mode = BPF_MODE(code); /* LD_IMM64 */ if (mode == BPF_IMM) return true; /* Both LD_IND and LD_ABS return 32-bit data. */ if (t != SRC_OP) return false; /* Implicit ctx ptr. */ if (regno == BPF_REG_6) return true; /* Explicit source could be any width. */ return true; } if (class == BPF_ST) /* The only source register for BPF_ST is a ptr. */ return true; /* Conservatively return true at default. */ return true; } /* Return TRUE if INSN doesn't have explicit value define. */ static bool insn_no_def(struct bpf_insn *insn) { u8 class = BPF_CLASS(insn->code); return (class == BPF_JMP || class == BPF_JMP32 || class == BPF_STX || class == BPF_ST); } /* Return TRUE if INSN has defined any 32-bit value explicitly. */ static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) { if (insn_no_def(insn)) return false; return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); } static void mark_insn_zext(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { s32 def_idx = reg->subreg_def; if (def_idx == DEF_NOT_SUBREG) return; env->insn_aux_data[def_idx - 1].zext_dst = true; /* The dst will be zero extended, so won't be sub-register anymore. */ reg->subreg_def = DEF_NOT_SUBREG; } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; struct bpf_reg_state *reg, *regs = state->regs; bool rw64; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } reg = &regs[regno]; rw64 = is_reg64(env, insn, regno, reg, t); if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (reg->type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } /* We don't need to worry about FP liveness because it's read-only */ if (regno == BPF_REG_FP) return 0; if (rw64) mark_insn_zext(env, reg); return mark_reg_read(env, reg, reg->parent, rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } reg->live |= REG_LIVE_WRITTEN; reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } /* for any branch, call, exit record the history of jmps in the given state */ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur) { u32 cnt = cur->jmp_history_cnt; struct bpf_idx_pair *p; cnt++; p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); if (!p) return -ENOMEM; p[cnt - 1].idx = env->insn_idx; p[cnt - 1].prev_idx = env->prev_insn_idx; cur->jmp_history = p; cur->jmp_history_cnt = cnt; return 0; } /* Backtrack one insn at a time. If idx is not at the top of recorded * history then previous instruction came from straight line execution. */ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, u32 *history) { u32 cnt = *history; if (cnt && st->jmp_history[cnt - 1].idx == i) { i = st->jmp_history[cnt - 1].prev_idx; (*history)--; } else { i--; } return i; } /* For given verifier state backtrack_insn() is called from the last insn to * the first insn. Its purpose is to compute a bitmask of registers and * stack slots that needs precision in the parent verifier state. */ static int backtrack_insn(struct bpf_verifier_env *env, int idx, u32 *reg_mask, u64 *stack_mask) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; struct bpf_insn *insn = env->prog->insnsi + idx; u8 class = BPF_CLASS(insn->code); u8 opcode = BPF_OP(insn->code); u8 mode = BPF_MODE(insn->code); u32 dreg = 1u << insn->dst_reg; u32 sreg = 1u << insn->src_reg; u32 spi; if (insn->code == 0) return 0; if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); verbose(env, "%d: ", idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (class == BPF_ALU || class == BPF_ALU64) { if (!(*reg_mask & dreg)) return 0; if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { /* dreg = sreg * dreg needs precision after this insn * sreg needs precision before this insn */ *reg_mask &= ~dreg; *reg_mask |= sreg; } else { /* dreg = K * dreg needs precision after this insn. * Corresponding register is already marked * as precise=true in this verifier state. * No further markings in parent are necessary */ *reg_mask &= ~dreg; } } else { if (BPF_SRC(insn->code) == BPF_X) { /* dreg += sreg * both dreg and sreg need precision * before this insn */ *reg_mask |= sreg; } /* else dreg += K * dreg still needs precision before this insn */ } } else if (class == BPF_LDX) { if (!(*reg_mask & dreg)) return 0; *reg_mask &= ~dreg; /* scalars can only be spilled into stack w/o losing precision. * Load from any other memory can be zero extended. * The desire to keep that precision is already indicated * by 'precise' mark in corresponding register of this state. * No further tracking necessary. */ if (insn->src_reg != BPF_REG_FP) return 0; if (BPF_SIZE(insn->code) != BPF_DW) return 0; /* dreg = *(u64 *)[fp - off] was a fill from the stack. * that [fp - off] slot contains scalar that needs to be * tracked with precision */ spi = (-insn->off - 1) / BPF_REG_SIZE; if (spi >= 64) { verbose(env, "BUG spi %d\n", spi); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } *stack_mask |= 1ull << spi; } else if (class == BPF_STX || class == BPF_ST) { if (*reg_mask & dreg) /* stx & st shouldn't be using _scalar_ dst_reg * to access memory. It means backtracking * encountered a case of pointer subtraction. */ return -ENOTSUPP; /* scalars can only be spilled into stack */ if (insn->dst_reg != BPF_REG_FP) return 0; if (BPF_SIZE(insn->code) != BPF_DW) return 0; spi = (-insn->off - 1) / BPF_REG_SIZE; if (spi >= 64) { verbose(env, "BUG spi %d\n", spi); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } if (!(*stack_mask & (1ull << spi))) return 0; *stack_mask &= ~(1ull << spi); if (class == BPF_STX) *reg_mask |= sreg; } else if (class == BPF_JMP || class == BPF_JMP32) { if (opcode == BPF_CALL) { if (insn->src_reg == BPF_PSEUDO_CALL) return -ENOTSUPP; /* regular helper call sets R0 */ *reg_mask &= ~1; if (*reg_mask & 0x3f) { /* if backtracing was looking for registers R1-R5 * they should have been found already. */ verbose(env, "BUG regs %x\n", *reg_mask); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } } else if (opcode == BPF_EXIT) { return -ENOTSUPP; } } else if (class == BPF_LD) { if (!(*reg_mask & dreg)) return 0; *reg_mask &= ~dreg; /* It's ld_imm64 or ld_abs or ld_ind. * For ld_imm64 no further tracking of precision * into parent is necessary */ if (mode == BPF_IND || mode == BPF_ABS) /* to be analyzed */ return -ENOTSUPP; } return 0; } /* the scalar precision tracking algorithm: * . at the start all registers have precise=false. * . scalar ranges are tracked as normal through alu and jmp insns. * . once precise value of the scalar register is used in: * . ptr + scalar alu * . if (scalar cond K|scalar) * . helper_call(.., scalar, ...) where ARG_CONST is expected * backtrack through the verifier states and mark all registers and * stack slots with spilled constants that these scalar regisers * should be precise. * . during state pruning two registers (or spilled stack slots) * are equivalent if both are not precise. * * Note the verifier cannot simply walk register parentage chain, * since many different registers and stack slots could have been * used to compute single precise scalar. * * The approach of starting with precise=true for all registers and then * backtrack to mark a register as not precise when the verifier detects * that program doesn't care about specific value (e.g., when helper * takes register as ARG_ANYTHING parameter) is not safe. * * It's ok to walk single parentage chain of the verifier states. * It's possible that this backtracking will go all the way till 1st insn. * All other branches will be explored for needing precision later. * * The backtracking needs to deal with cases like: * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) * r9 -= r8 * r5 = r9 * if r5 > 0x79f goto pc+7 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) * r5 += 1 * ... * call bpf_perf_event_output#25 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO * * and this case: * r6 = 1 * call foo // uses callee's r6 inside to compute r0 * r0 += r6 * if r0 == 0 goto * * to track above reg_mask/stack_mask needs to be independent for each frame. * * Also if parent's curframe > frame where backtracking started, * the verifier need to mark registers in both frames, otherwise callees * may incorrectly prune callers. This is similar to * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") * * For now backtracking falls back into conservative marking. */ static void mark_all_scalars_precise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { struct bpf_func_state *func; struct bpf_reg_state *reg; int i, j; /* big hammer: mark all scalars precise in this path. * pop_stack may still get !precise scalars. */ for (; st; st = st->parent) for (i = 0; i <= st->curframe; i++) { func = st->frame[i]; for (j = 0; j < BPF_REG_FP; j++) { reg = &func->regs[j]; if (reg->type != SCALAR_VALUE) continue; reg->precise = true; } for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { if (func->stack[j].slot_type[0] != STACK_SPILL) continue; reg = &func->stack[j].spilled_ptr; if (reg->type != SCALAR_VALUE) continue; reg->precise = true; } } } static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, int spi) { struct bpf_verifier_state *st = env->cur_state; int first_idx = st->first_insn_idx; int last_idx = env->insn_idx; struct bpf_func_state *func; struct bpf_reg_state *reg; u32 reg_mask = regno >= 0 ? 1u << regno : 0; u64 stack_mask = spi >= 0 ? 1ull << spi : 0; bool skip_first = true; bool new_marks = false; int i, err; if (!env->bpf_capable) return 0; func = st->frame[st->curframe]; if (regno >= 0) { reg = &func->regs[regno]; if (reg->type != SCALAR_VALUE) { WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } if (!reg->precise) new_marks = true; else reg_mask = 0; reg->precise = true; } while (spi >= 0) { if (func->stack[spi].slot_type[0] != STACK_SPILL) { stack_mask = 0; break; } reg = &func->stack[spi].spilled_ptr; if (reg->type != SCALAR_VALUE) { stack_mask = 0; break; } if (!reg->precise) new_marks = true; else stack_mask = 0; reg->precise = true; break; } if (!new_marks) return 0; if (!reg_mask && !stack_mask) return 0; for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); for (i = last_idx;;) { if (skip_first) { err = 0; skip_first = false; } else { err = backtrack_insn(env, i, &reg_mask, &stack_mask); } if (err == -ENOTSUPP) { mark_all_scalars_precise(env, st); return 0; } else if (err) { return err; } if (!reg_mask && !stack_mask) /* Found assignment(s) into tracked register in this state. * Since this state is already marked, just return. * Nothing to be tracked further in the parent state. */ return 0; if (i == first_idx) break; i = get_prev_insn_idx(st, i, &history); if (i >= env->prog->len) { /* This can happen if backtracking reached insn 0 * and there are still reg_mask or stack_mask * to backtrack. * It means the backtracking missed the spot where * particular register was initialized with a constant. */ verbose(env, "BUG backtracking idx %d\n", i); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } } st = st->parent; if (!st) break; new_marks = false; func = st->frame[st->curframe]; bitmap_from_u64(mask, reg_mask); for_each_set_bit(i, mask, 32) { reg = &func->regs[i]; if (reg->type != SCALAR_VALUE) { reg_mask &= ~(1u << i); continue; } if (!reg->precise) new_marks = true; reg->precise = true; } bitmap_from_u64(mask, stack_mask); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { /* the sequence of instructions: * 2: (bf) r3 = r10 * 3: (7b) *(u64 *)(r3 -8) = r0 * 4: (79) r4 = *(u64 *)(r10 -8) * doesn't contain jmps. It's backtracked * as a single block. * During backtracking insn 3 is not recognized as * stack access, so at the end of backtracking * stack slot fp-8 is still marked in stack_mask. * However the parent state may not have accessed * fp-8 and it's "unallocated" stack space. * In such case fallback to conservative. */ mark_all_scalars_precise(env, st); return 0; } if (func->stack[i].slot_type[0] != STACK_SPILL) { stack_mask &= ~(1ull << i); continue; } reg = &func->stack[i].spilled_ptr; if (reg->type != SCALAR_VALUE) { stack_mask &= ~(1ull << i); continue; } if (!reg->precise) new_marks = true; reg->precise = true; } if (env->log.level & BPF_LOG_LEVEL) { print_verifier_state(env, func); verbose(env, "parent %s regs=%x stack=%llx marks\n", new_marks ? "didn't have" : "already had", reg_mask, stack_mask); } if (!reg_mask && !stack_mask) break; if (!new_marks) break; last_idx = st->last_insn_idx; first_idx = st->first_insn_idx; } return 0; } static int mark_chain_precision(struct bpf_verifier_env *env, int regno) { return __mark_chain_precision(env, regno, -1); } static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) { return __mark_chain_precision(env, -1, spi); } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BTF_ID_OR_NULL: case PTR_TO_RDONLY_BUF: case PTR_TO_RDONLY_BUF_OR_NULL: case PTR_TO_RDWR_BUF: case PTR_TO_RDWR_BUF_OR_NULL: return true; default: return false; } } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } static bool register_is_const(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static void save_register_state(struct bpf_func_state *state, int spi, struct bpf_reg_state *reg) { int i; state->stack[spi].spilled_ptr = *reg; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_func_state *state, /* func where register points to */ int off, int size, int value_regno, int insn_idx) { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; struct bpf_reg_state *reg = NULL; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), state->acquired_refs, true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0) reg = &cur->regs[value_regno]; if (reg && size == BPF_REG_SIZE && register_is_const(reg) && !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit * stack slot address like [fp - 8]. Other spill of * scalar via different register has to be conervative. * Backtrack from here and mark all registers as precise * that contributed into 'reg' being a constant. */ err = mark_chain_precision(env, value_regno); if (err) return err; } save_register_state(state, spi, reg); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose_linfo(env, insn_idx, "; "); verbose(env, "invalid size of register spill\n"); return -EACCES; } if (state != cur && reg->type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } if (!env->bypass_spec_v4) { bool sanitize = false; if (state->stack[spi].slot_type[0] == STACK_SPILL && register_is_const(&state->stack[spi].spilled_ptr)) sanitize = true; for (i = 0; i < BPF_REG_SIZE; i++) if (state->stack[spi].slot_type[i] == STACK_MISC) { sanitize = true; break; } if (sanitize) { int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; int soff = (-spi - 1) * BPF_REG_SIZE; /* detected reuse of integer stack slot with a pointer * which means either llvm is reusing stack slot or * an attacker is trying to exploit CVE-2018-3639 * (speculative store bypass) * Have to sanitize that slot with preemptive * store of zero. */ if (*poff && *poff != soff) { /* disallow programs where single insn stores * into two different stack slots, since verifier * cannot sanitize them */ verbose(env, "insn %d cannot access two stack slots fp%d and fp%d", insn_idx, *poff, soff); return -EINVAL; } *poff = soff; } } save_register_state(state, spi, reg); } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ if (state->stack[spi].slot_type[0] == STACK_SPILL) for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_MISC; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon * when stack slots are partially written. * This heuristic means that read propagation will be * conservative, since it will add reg_live_read marks * to stack slots all the way to first state when programs * writes+reads less than 8 bytes */ if (size == BPF_REG_SIZE) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ if (reg && register_is_null(reg)) { /* backtracking doesn't work for STACK_ZERO yet. */ err = mark_chain_precision(env, value_regno); if (err) return err; type = STACK_ZERO; } /* Mark slots affected by this stack write. */ for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; } return 0; } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; u8 *stype; if (reg_state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = reg_state->stack[spi].slot_type; reg = &reg_state->stack[spi].spilled_ptr; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); verbose(env, "invalid size of register fill\n"); return -EACCES; } if (value_regno >= 0) { mark_reg_unknown(env, state->regs, value_regno); state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); return 0; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = *reg; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { /* If value_regno==-1, the caller is asking us whether * it is acceptable to use this value as a SCALAR_VALUE * (e.g. for XADD). * We must not allow unprivileged callers to do that * with spilled pointers. */ verbose(env, "leaking pointer from stack off %d\n", off); return -EACCES; } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { int zeros = 0; for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) continue; if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { zeros++; continue; } verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, * so the whole register == const_zero */ __mark_reg_const_zero(&state->regs[value_regno]); /* backtracking doesn't support STACK_ZERO yet, * so mark it precise here, so that later * backtracking can stop here. * Backtracking may not need this if this register * doesn't participate in pointer adjustment. * Forward propagation of precise flag is not * necessary either. This mark is only to stop * backtracking. Any register that contributed * to const 0 was marked precise before spill. */ state->regs[value_regno].precise = true; } else { /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } } return 0; } static int check_stack_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { /* Stack accesses must be at a fixed offset, so that we * can determine what type of data were returned. See * check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d\n", tn_buf, off, size); return -EACCES; } if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } return 0; } static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, int off, int size, enum bpf_access_type type) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; u32 cap = bpf_map_flags_to_cap(map); if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ static int __check_mem_access(struct bpf_verifier_env *env, int regno, int off, int size, u32 mem_size, bool zero_size_allowed) { bool size_ok = size > 0 || (size == 0 && zero_size_allowed); struct bpf_reg_state *reg; if (off >= 0 && size_ok && (u64)off + size <= mem_size) return 0; reg = &cur_regs(env)[regno]; switch (reg->type) { case PTR_TO_MAP_VALUE: verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", mem_size, off, size); break; case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, off, mem_size); break; case PTR_TO_MEM: default: verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", mem_size, off, size); } return -EACCES; } /* check read/write into a memory region with possible variable offset */ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, int off, int size, u32 mem_size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register pointing to memory region, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level & BPF_LOG_LEVEL) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0 && (reg->smin_value == S64_MIN || (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || reg->smin_value + off < 0)) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_mem_access(env, regno, reg->smin_value + off, size, mem_size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the allowed memory range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", regno); return -EACCES; } err = __check_mem_access(env, regno, reg->umax_value + off, size, mem_size, zero_size_allowed); if (err) { verbose(env, "R%d max value is outside of the allowed memory range\n", regno); return err; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; struct bpf_map *map = reg->map_ptr; int err; err = check_mem_region_access(env, regno, off, size, map->value_size, zero_size_allowed); if (err) return err; if (map_value_has_spin_lock(map)) { u32 lock = map->spin_lock_off; /* if any part of struct bpf_spin_lock can be touched by * load/store reject this program. * To check that [x1, x2) overlaps with [y1, y2) * it is sufficient to check x1 < y2 && y1 < x2. */ if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && lock < reg->umax_value + off + size) { verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); return -EACCES; } } return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SKB: if (t == BPF_WRITE) return false; /* fallthrough */ /* Program types with direct read + write access go here! */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; case BPF_PROG_TYPE_CGROUP_SOCKOPT: if (t == BPF_WRITE) env->seen_direct_write = true; return true; default: return false; } } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_mem_access(env, regno, off, size, reg->range, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } /* __check_mem_access has made sure "off + size - 1" is within u16. * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, * otherwise find_good_pkt_pointers would have refused to set range info * that __check_mem_access would have rejected this pkt access. * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. */ env->prog->aux->max_pkt_offset = max_t(u32, env->prog->aux->max_pkt_offset, off + reg->umax_value + size - 1); return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type, u32 *btf_id) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, .log = &env->log, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, env->prog, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) *btf_id = info.btf_id; else env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static int check_flow_keys_access(struct bpf_verifier_env *env, int off, int size) { if (size < 0 || off < 0 || (u64)off + size > sizeof(struct bpf_flow_keys)) { verbose(env, "invalid access to flow keys off=%d size=%d\n", off, size); return -EACCES; } return 0; } static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int size, enum bpf_access_type t) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; struct bpf_insn_access_aux info = {}; bool valid; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } switch (reg->type) { case PTR_TO_SOCK_COMMON: valid = bpf_sock_common_is_valid_access(off, size, t, &info); break; case PTR_TO_SOCKET: valid = bpf_sock_is_valid_access(off, size, t, &info); break; case PTR_TO_TCP_SOCK: valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); break; case PTR_TO_XDP_SOCK: valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); break; default: valid = false; } if (valid) { env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; return 0; } verbose(env, "R%d invalid %s access off=%d size=%d\n", regno, reg_type_str[reg->type], off, size); return -EACCES; } static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) { return cur_regs(env) + regno; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return reg->type == PTR_TO_CTX; } static bool is_sk_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_sk_pointer(reg->type); } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_pkt_pointer(reg->type); } static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = reg_state(env, regno); /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ return reg->type == PTR_TO_FLOW_KEYS; } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict_alignment_once) { bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_FLOW_KEYS: pointer_desc = "flow keys "; break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; case PTR_TO_SOCKET: pointer_desc = "sock "; break; case PTR_TO_SOCK_COMMON: pointer_desc = "sock_common "; break; case PTR_TO_TCP_SOCK: pointer_desc = "tcp_sock "; break; case PTR_TO_XDP_SOCK: pointer_desc = "xdp_sock "; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { u16 stack = env->subprog_info[func->subprogno].stack_depth; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_info[func->subprogno].stack_depth = -off; return 0; } /* starting from main bpf function walk all instructions of the function * and recursively walk all callees that given function can call. * Ignore jump and exit insns. * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ static int check_max_stack_depth(struct bpf_verifier_env *env) { int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; process_func: /* round up to 32-bytes, since this is granularity * of interpreter stack size */ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", frame + 1, depth); return -EACCES; } continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ i = i + insn[i].imm + 1; idx = find_subprog(env, i); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i); return -EFAULT; } frame++; if (frame >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep !\n", frame); return -E2BIG; } goto process_func; } /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT */ if (frame == 0) return 0; depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; goto continue_func; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx) { int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start); return -EFAULT; } return env->subprog_info[subprog].stack_depth; } #endif int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", regno, reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); return -EACCES; } return 0; } static int __check_buffer_access(struct bpf_verifier_env *env, const char *buf_info, const struct bpf_reg_state *reg, int regno, int off, int size) { if (off < 0) { verbose(env, "R%d invalid %s buffer access: off=%d, size=%d\n", regno, buf_info, off, size); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "R%d invalid variable buffer offset: off=%d, var_off=%s\n", regno, off, tn_buf); return -EACCES; } return 0; } static int check_tp_buffer_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, int off, int size) { int err; err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); if (err) return err; if (off + size > env->prog->aux->max_tp_access) env->prog->aux->max_tp_access = off + size; return 0; } static int check_buffer_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, int off, int size, bool zero_size_allowed, const char *buf_info, u32 *max_access) { int err; err = __check_buffer_access(env, buf_info, reg, regno, off, size); if (err) return err; if (off + size > *max_access) *max_access = off + size; return 0; } /* BPF architecture zero extends alu32 ops into 64-bit registesr */ static void zext_32_to_64(struct bpf_reg_state *reg) { reg->var_off = tnum_subreg(reg->var_off); __reg_assign_32_into_64(reg); } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; /* If size is smaller than 32bit register the 32bit register * values are also truncated so we push 64-bit bounds into * 32-bit bounds. Above were truncated < 32-bits already. */ if (size >= 4) return; __reg_combine_64_into_32(reg); } static bool bpf_map_is_rdonly(const struct bpf_map *map) { return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; } static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) { void *ptr; u64 addr; int err; err = map->ops->map_direct_value_addr(map, &addr, off); if (err) return err; ptr = (void *)(long)addr + off; switch (size) { case sizeof(u8): *val = (u64)*(u8 *)ptr; break; case sizeof(u16): *val = (u64)*(u16 *)ptr; break; case sizeof(u32): *val = (u64)*(u32 *)ptr; break; case sizeof(u64): *val = *(u64 *)ptr; break; default: return -EINVAL; } return 0; } static int check_ptr_to_btf_access(struct bpf_verifier_env *env, struct bpf_reg_state *regs, int regno, int off, int size, enum bpf_access_type atype, int value_regno) { struct bpf_reg_state *reg = regs + regno; const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); u32 btf_id; int ret; if (off < 0) { verbose(env, "R%d is ptr_%s invalid negative access: off=%d\n", regno, tname, off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", regno, tname, off, tn_buf); return -EACCES; } if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, t, off, size, atype, &btf_id); } else { if (atype != BPF_READ) { verbose(env, "only read is supported\n"); return -EACCES; } ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); } if (ret < 0) return ret; if (atype == BPF_READ && value_regno >= 0) mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); return 0; } static int check_ptr_to_map_access(struct bpf_verifier_env *env, struct bpf_reg_state *regs, int regno, int off, int size, enum bpf_access_type atype, int value_regno) { struct bpf_reg_state *reg = regs + regno; struct bpf_map *map = reg->map_ptr; const struct btf_type *t; const char *tname; u32 btf_id; int ret; if (!btf_vmlinux) { verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); return -ENOTSUPP; } if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { verbose(env, "map_ptr access not supported for map type %d\n", map->map_type); return -ENOTSUPP; } t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); tname = btf_name_by_offset(btf_vmlinux, t->name_off); if (!env->allow_ptr_to_map_access) { verbose(env, "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", tname); return -EPERM; } if (off < 0) { verbose(env, "R%d is %s invalid negative access: off=%d\n", regno, tname, off); return -EACCES; } if (atype != BPF_READ) { verbose(env, "only read from %s is supported\n", tname); return -EACCES; } ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); if (ret < 0) return ret; if (value_regno >= 0) mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); return 0; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access_type(env, regno, off, size, t); if (err) return err; err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) { struct bpf_map *map = reg->map_ptr; /* if map is read-only, track its contents as scalars */ if (tnum_is_const(reg->var_off) && bpf_map_is_rdonly(map) && map->ops->map_direct_value_addr) { int map_off = off + reg->var_off.value; u64 val = 0; err = bpf_map_direct_read(map, map_off, size, &val); if (err) return err; regs[value_regno].type = SCALAR_VALUE; __mark_reg_known(&regs[value_regno], val); } else { mark_reg_unknown(env, regs, value_regno); } } } else if (reg->type == PTR_TO_MEM) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into mem\n", value_regno); return -EACCES; } err = check_mem_region_access(env, regno, off, size, reg->mem_size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; u32 btf_id = 0; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } err = check_ctx_reg(env, reg, regno); if (err < 0) return err; err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) { mark_reg_unknown(env, regs, value_regno); } else { mark_reg_known_zero(env, regs, value_regno); if (reg_type_may_be_null(reg_type)) regs[value_regno].id = ++env->id_gen; /* A load of ctx field could have different * actual load size with the one encoded in the * insn. When the dst is PTR, it is for sure not * a sub-register. */ regs[value_regno].subreg_def = DEF_NOT_SUBREG; if (reg_type == PTR_TO_BTF_ID || reg_type == PTR_TO_BTF_ID_OR_NULL) regs[value_regno].btf_id = btf_id; } regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { off += reg->var_off.value; err = check_stack_access(env, reg, off, size); if (err) return err; state = func(env, reg); err = update_stack_depth(env, state, off); if (err) return err; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno, insn_idx); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_FLOW_KEYS) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into flow keys\n", value_regno); return -EACCES; } err = check_flow_keys_access(env, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (type_is_sk_pointer(reg->type)) { if (t == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", regno, reg_type_str[reg->type]); return -EACCES; } err = check_sock_access(env, insn_idx, regno, off, size, t); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_TP_BUFFER) { err = check_tp_buffer_access(env, reg, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_BTF_ID) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); } else if (reg->type == CONST_PTR_TO_MAP) { err = check_ptr_to_map_access(env, regs, regno, off, size, t, value_regno); } else if (reg->type == PTR_TO_RDONLY_BUF) { if (t == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", regno, reg_type_str[reg->type]); return -EACCES; } err = check_buffer_access(env, reg, regno, off, size, false, "rdonly", &env->prog->aux->max_rdonly_access); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_RDWR_BUF) { err = check_buffer_access(env, reg, regno, off, size, false, "rdwr", &env->prog->aux->max_rdwr_access); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg) || is_flow_key_reg(env, insn->dst_reg) || is_sk_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true); } static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, int off, int access_size, bool zero_size_allowed) { struct bpf_reg_state *reg = reg_state(env, regno); if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { if (tnum_is_const(reg->var_off)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); } else { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", regno, tn_buf, access_size); } return -EACCES; } return 0; } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *reg = reg_state(env, regno); struct bpf_func_state *state = func(env, reg); int err, min_off, max_off, i, j, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(reg)) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[reg->type], reg_type_str[PTR_TO_STACK]); return -EACCES; } if (tnum_is_const(reg->var_off)) { min_off = max_off = reg->var_off.value + reg->off; err = __check_stack_boundary(env, regno, min_off, access_size, zero_size_allowed); if (err) return err; } else { /* Variable offset is prohibited for unprivileged mode for * simplicity since it requires corresponding support in * Spectre masking for stack ALU. * See also retrieve_ptr_limit(). */ if (!env->bypass_spec_v1) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", regno, tn_buf); return -EACCES; } /* Only initialized buffer on stack is allowed to be accessed * with variable offset. With uninitialized buffer it's hard to * guarantee that whole memory is marked as initialized on * helper return since specific bounds are unknown what may * cause uninitialized stack leaking. */ if (meta && meta->raw_mode) meta = NULL; if (reg->smax_value >= BPF_MAX_VAR_OFF || reg->smax_value <= -BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded indirect variable offset stack access\n", regno); return -EACCES; } min_off = reg->smin_value + reg->off; max_off = reg->smax_value + reg->off; err = __check_stack_boundary(env, regno, min_off, access_size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of stack bound\n", regno); return err; } err = __check_stack_boundary(env, regno, max_off, access_size, zero_size_allowed); if (err) { verbose(env, "R%d max value is outside of stack bound\n", regno); return err; } } if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = min_off; i < max_off + access_size; i++) { u8 *stype; slot = -i - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; if (*stype == STACK_MISC) goto mark; if (*stype == STACK_ZERO) { /* helper can write anything into the stack */ *stype = STACK_MISC; goto mark; } if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) goto mark; if (state->stack[spi].slot_type[0] == STACK_SPILL && state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) state->stack[spi].slot_type[j] = STACK_MISC; goto mark; } err: if (tnum_is_const(reg->var_off)) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", min_off, i - min_off, access_size); } else { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", tn_buf, i - min_off, access_size); } return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ mark_reg_read(env, &state->stack[spi].spilled_ptr, state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); } return update_stack_depth(env, state, min_off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: if (check_map_access_type(env, regno, reg->off, access_size, meta && meta->raw_mode ? BPF_WRITE : BPF_READ)) return -EACCES; return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MEM: return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed); case PTR_TO_RDONLY_BUF: if (meta && meta->raw_mode) return -EACCES; return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, "rdonly", &env->prog->aux->max_rdonly_access); case PTR_TO_RDWR_BUF: return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, "rdwr", &env->prog->aux->max_rdwr_access); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } /* Implementation details: * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL * Two bpf_map_lookups (even with the same key) will have different reg->id. * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after * value_or_null->value transition, since the verifier only cares about * the range of access to valid map value pointer and doesn't care about actual * address of the map element. * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps * reg->id > 0 after value_or_null->value transition. By doing so * two bpf_map_lookups will be considered two different pointers that * point to different bpf_spin_locks. * The verifier allows taking only one bpf_spin_lock at a time to avoid * dead-locks. * Since only one bpf_spin_lock is allowed the checks are simpler than * reg_is_refcounted() logic. The verifier needs to remember only * one spin_lock instead of array of acquired_refs. * cur_state->active_spin_lock remembers which map value element got locked * and clears it after bpf_spin_unlock. */ static int process_spin_lock(struct bpf_verifier_env *env, int regno, bool is_lock) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_verifier_state *cur = env->cur_state; bool is_const = tnum_is_const(reg->var_off); struct bpf_map *map = reg->map_ptr; u64 val = reg->var_off.value; if (reg->type != PTR_TO_MAP_VALUE) { verbose(env, "R%d is not a pointer to map_value\n", regno); return -EINVAL; } if (!is_const) { verbose(env, "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", regno); return -EINVAL; } if (!map->btf) { verbose(env, "map '%s' has to have BTF in order to use bpf_spin_lock\n", map->name); return -EINVAL; } if (!map_value_has_spin_lock(map)) { if (map->spin_lock_off == -E2BIG) verbose(env, "map '%s' has more than one 'struct bpf_spin_lock'\n", map->name); else if (map->spin_lock_off == -ENOENT) verbose(env, "map '%s' doesn't have 'struct bpf_spin_lock'\n", map->name); else verbose(env, "map '%s' is not a struct type or bpf_spin_lock is mangled\n", map->name); return -EINVAL; } if (map->spin_lock_off != val + reg->off) { verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", val + reg->off); return -EINVAL; } if (is_lock) { if (cur->active_spin_lock) { verbose(env, "Locking two bpf_spin_locks are not allowed\n"); return -EINVAL; } cur->active_spin_lock = reg->id; } else { if (!cur->active_spin_lock) { verbose(env, "bpf_spin_unlock without taking a lock\n"); return -EINVAL; } if (cur->active_spin_lock != reg->id) { verbose(env, "bpf_spin_unlock of different lock\n"); return -EINVAL; } cur->active_spin_lock = 0; } return 0; } static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || type == ARG_PTR_TO_MEM_OR_NULL || type == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) { return type == ARG_CONST_SIZE || type == ARG_CONST_SIZE_OR_ZERO; } static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_ALLOC_MEM || type == ARG_PTR_TO_ALLOC_MEM_OR_NULL; } static bool arg_type_is_alloc_size(enum bpf_arg_type type) { return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; } static bool arg_type_is_int_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_INT || type == ARG_PTR_TO_LONG; } static int int_ptr_type_to_size(enum bpf_arg_type type) { if (type == ARG_PTR_TO_INT) return sizeof(u32); else if (type == ARG_PTR_TO_LONG) return sizeof(u64); return -EINVAL; } static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) { u32 regno = BPF_REG_1 + arg; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; enum bpf_arg_type arg_type = fn->arg_type[arg]; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { expected_type = PTR_TO_STACK; if (register_is_null(reg) && arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO || arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX || arg_type == ARG_PTR_TO_CTX_OR_NULL) { expected_type = PTR_TO_CTX; if (!(register_is_null(reg) && arg_type == ARG_PTR_TO_CTX_OR_NULL)) { if (type != expected_type) goto err_type; err = check_ctx_reg(env, reg, regno); if (err < 0) return err; } } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { expected_type = PTR_TO_SOCK_COMMON; /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ if (!type_is_sk_pointer(type)) goto err_type; if (reg->ref_obj_id) { if (meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", regno, reg->ref_obj_id, meta->ref_obj_id); return -EFAULT; } meta->ref_obj_id = reg->ref_obj_id; } } else if (arg_type == ARG_PTR_TO_SOCKET || arg_type == ARG_PTR_TO_SOCKET_OR_NULL) { expected_type = PTR_TO_SOCKET; if (!(register_is_null(reg) && arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) { if (type != expected_type) goto err_type; } } else if (arg_type == ARG_PTR_TO_BTF_ID) { expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; if (!fn->check_btf_id) { if (reg->btf_id != meta->btf_id) { verbose(env, "Helper has type %s got %s in R%d\n", kernel_type_name(meta->btf_id), kernel_type_name(reg->btf_id), regno); return -EACCES; } } else if (!fn->check_btf_id(reg->btf_id, arg)) { verbose(env, "Helper does not support %s in R%d\n", kernel_type_name(reg->btf_id), regno); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", regno); return -EACCES; } } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { if (meta->func_id == BPF_FUNC_spin_lock) { if (process_spin_lock(env, regno, true)) return -EACCES; } else if (meta->func_id == BPF_FUNC_spin_unlock) { if (process_spin_lock(env, regno, false)) return -EACCES; } else { verbose(env, "verifier internal error\n"); return -EFAULT; } } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(reg) && (arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != PTR_TO_MEM && type != PTR_TO_RDONLY_BUF && type != PTR_TO_RDWR_BUF && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else if (arg_type_is_alloc_mem_ptr(arg_type)) { expected_type = PTR_TO_MEM; if (register_is_null(reg) && arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (type != expected_type) goto err_type; if (meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", regno, reg->ref_obj_id, meta->ref_obj_id); return -EFAULT; } meta->ref_obj_id = reg->ref_obj_id; } else if (arg_type_is_int_ptr(arg_type)) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE || (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && !register_is_null(reg)) || arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, meta); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* This is used to refine r0 return value bounds for helpers * that enforce this value as an upper bound on return values. * See do_refine_retval_range() for helpers that can refine * the return value. C type of helper is u32 so we pull register * bound from umax_value however, if negative verifier errors * out. Only upper bounds can be learned because retval is an * int type and negative retvals are allowed. */ meta->msize_max_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); if (!err) err = mark_chain_precision(env, regno); } else if (arg_type_is_alloc_size(arg_type)) { if (!tnum_is_const(reg->var_off)) { verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } meta->mem_size = reg->var_off.value; } else if (arg_type_is_int_ptr(arg_type)) { int size = int_ptr_type_to_size(arg_type); err = check_helper_mem_access(env, regno, size, false, meta); if (err) return err; err = check_ptr_alignment(env, reg, 0, size, true); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_skb_output && func_id != BPF_FUNC_perf_event_read_value && func_id != BPF_FUNC_xdp_output) goto error; break; case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && func_id != BPF_FUNC_ringbuf_submit && func_id != BPF_FUNC_ringbuf_discard && func_id != BPF_FUNC_ringbuf_query) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP_HASH: if (func_id != BPF_FUNC_redirect_map && func_id != BPF_FUNC_map_lookup_elem) goto error; break; /* Restrict bpf side of cpumap and xskmap, open when use-cases * appear. */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map && func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map && func_id != BPF_FUNC_sk_select_reuseport && func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash && func_id != BPF_FUNC_sk_select_reuseport && func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; case BPF_MAP_TYPE_QUEUE: case BPF_MAP_TYPE_STACK: if (func_id != BPF_FUNC_map_peek_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_push_elem) goto error; break; case BPF_MAP_TYPE_SK_STORAGE: if (func_id != BPF_FUNC_sk_storage_get && func_id != BPF_FUNC_sk_storage_delete) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; if (env->subprog_cnt > 1) { verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); return -EINVAL; } break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: case BPF_FUNC_skb_output: case BPF_FUNC_xdp_output: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; break; case BPF_FUNC_sk_redirect_map: case BPF_FUNC_msg_redirect_map: case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_msg_redirect_hash: case BPF_FUNC_sock_hash_update: if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_get_local_storage: if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_pop_elem: case BPF_FUNC_map_push_elem: if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) goto error; break; case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_delete: if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; /* We only support one arg being in raw mode at the moment, * which is sufficient for the helper functions we have * right now. */ return count <= 1; } static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, enum bpf_arg_type arg_next) { return (arg_type_is_mem_ptr(arg_curr) && !arg_type_is_mem_size(arg_next)) || (!arg_type_is_mem_ptr(arg_curr) && arg_type_is_mem_size(arg_next)); } static bool check_arg_pair_ok(const struct bpf_func_proto *fn) { /* bpf_xxx(..., buf, len) call will access 'len' * bytes from memory 'buf'. Both arg types need * to be paired, so make sure there's no buggy * helper function specification. */ if (arg_type_is_mem_size(fn->arg1_type) || arg_type_is_mem_ptr(fn->arg5_type) || check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) return false; return true; } static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) { int count = 0; if (arg_type_may_be_refcounted(fn->arg1_type)) count++; if (arg_type_may_be_refcounted(fn->arg2_type)) count++; if (arg_type_may_be_refcounted(fn->arg3_type)) count++; if (arg_type_may_be_refcounted(fn->arg4_type)) count++; if (arg_type_may_be_refcounted(fn->arg5_type)) count++; /* A reference acquiring function cannot acquire * another refcounted ptr. */ if (may_be_acquire_function(func_id) && count) return false; /* We only support one arg being unreferenced at the moment, * which is sufficient for the helper functions we have right now. */ return count <= 1; } static int check_func_proto(const struct bpf_func_proto *fn, int func_id) { return check_raw_mode_ok(fn) && check_arg_pair_ok(fn) && check_refcount_ok(fn, func_id) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(env, reg); } } static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) __clear_all_pkt_pointers(env, vstate->frame[i]); } static void release_reg_references(struct bpf_verifier_env *env, struct bpf_func_state *state, int ref_obj_id) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].ref_obj_id == ref_obj_id) mark_reg_unknown(env, regs, i); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg->ref_obj_id == ref_obj_id) __mark_reg_unknown(env, reg); } } /* The pointer with the specified id has released its reference to kernel * resources. Identify all copies of the same pointer and clear the reference. */ static int release_reference(struct bpf_verifier_env *env, int ref_obj_id) { struct bpf_verifier_state *vstate = env->cur_state; int err; int i; err = release_reference_state(cur_func(env), ref_obj_id); if (err) return err; for (i = 0; i <= vstate->curframe; i++) release_reg_references(env, vstate->frame[i], ref_obj_id); return 0; } static void clear_caller_saved_regs(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } } static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_info_aux *func_info_aux; struct bpf_func_state *caller, *callee; int i, err, subprog, target_insn; bool is_global = false; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2); return -E2BIG; } target_insn = *insn_idx + insn->imm; subprog = find_subprog(env, target_insn + 1); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1); return -EFAULT; } caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } func_info_aux = env->prog->aux->func_info_aux; if (func_info_aux) is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; err = btf_check_func_arg_match(env, subprog, caller->regs); if (err == -EFAULT) return err; if (is_global) { if (err) { verbose(env, "Caller passes invalid args into func#%d\n", subprog); return err; } else { if (env->log.level & BPF_LOG_LEVEL) verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); clear_caller_saved_regs(env, caller->regs); /* All global functions return SCALAR_VALUE */ mark_reg_unknown(env, caller->regs, BPF_REG_0); /* continue with next insn after call */ return 0; } } callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; /* callee cannot access r0, r6 - r9 for reading and has to write * into its own stack before reading from it. * callee can read/write into caller's stack */ init_func_state(env, callee, /* remember the callsite, it will be used by bpf_exit */ *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); /* Transfer references to the callee */ err = transfer_reference_state(callee, caller); if (err) return err; /* copy r1 - r5 args that callee can access. The copy includes parent * pointers, which connects us up to the liveness chain */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; clear_caller_saved_regs(env, caller->regs); /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ *insn_idx = target_insn; if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "caller:\n"); print_verifier_state(env, caller); verbose(env, "callee:\n"); print_verifier_state(env, callee); } return 0; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; int err; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; if (r0->type == PTR_TO_STACK) { /* technically it's ok to return caller's stack pointer * (or caller's caller's pointer) back to the caller, * since these pointers are valid. Only current stack * pointer will be invalid as soon as function exits, * but let's be conservative */ verbose(env, "cannot return stack pointer to the caller\n"); return -EINVAL; } state->curframe--; caller = state->frame[state->curframe]; /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; /* Transfer references to the caller */ err = transfer_reference_state(caller, callee); if (err) return err; *insn_idx = callee->callsite + 1; if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee); verbose(env, "to caller at %d:\n", *insn_idx); print_verifier_state(env, caller); } /* clear everything in the callee */ free_func_state(callee); state->frame[state->curframe + 1] = NULL; return 0; } static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str && func_id != BPF_FUNC_probe_read_kernel_str && func_id != BPF_FUNC_probe_read_user_str)) return; ret_reg->smax_value = meta->msize_max_value; ret_reg->s32_max_value = meta->msize_max_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); __update_reg_bounds(ret_reg); } static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; struct bpf_map *map = meta->map_ptr; if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_map_push_elem && func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_peek_elem) return 0; if (map == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } /* In case of read-only, some additional restrictions * need to be applied in order to prevent altering the * state of the map from program side. */ if ((map->map_flags & BPF_F_RDONLY_PROG) && (func_id == BPF_FUNC_map_delete_elem || func_id == BPF_FUNC_map_update_elem || func_id == BPF_FUNC_map_push_elem || func_id == BPF_FUNC_map_pop_elem)) { verbose(env, "write into map forbidden\n"); return -EACCES; } if (!BPF_MAP_PTR(aux->map_ptr_state)) bpf_map_ptr_store(aux, meta->map_ptr, !meta->map_ptr->bypass_spec_v1); else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, !meta->map_ptr->bypass_spec_v1); return 0; } static int record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; struct bpf_reg_state *regs = cur_regs(env), *reg; struct bpf_map *map = meta->map_ptr; struct tnum range; u64 val; int err; if (func_id != BPF_FUNC_tail_call) return 0; if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } range = tnum_range(0, map->max_entries - 1); reg = &regs[BPF_REG_3]; if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { bpf_map_key_store(aux, BPF_MAP_KEY_POISON); return 0; } err = mark_chain_precision(env, BPF_REG_3); if (err) return err; val = reg->var_off.value; if (bpf_map_key_unseen(aux)) bpf_map_key_store(aux, val); else if (!bpf_map_key_poisoned(aux) && bpf_map_key_immediate(aux) != val) bpf_map_key_store(aux, BPF_MAP_KEY_POISON); return 0; } static int check_reference_leak(struct bpf_verifier_env *env) { struct bpf_func_state *state = cur_func(env); int i; for (i = 0; i < state->acquired_refs; i++) { verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", state->refs[i].id, state->refs[i].insn_idx); } return state->acquired_refs ? -EINVAL : 0; } static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id, env->prog); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; err = check_func_proto(fn, func_id); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } meta.func_id = func_id; /* check args */ for (i = 0; i < 5; i++) { if (!fn->check_btf_id) { err = btf_resolve_helper_id(&env->log, fn, i); if (err > 0) meta.btf_id = err; } err = check_func_arg(env, i, &meta, fn); if (err) return err; } err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; err = record_func_key(env, &meta, func_id, insn_idx); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } if (func_id == BPF_FUNC_tail_call) { err = check_reference_leak(env); if (err) { verbose(env, "tail_call would lead to reference leak\n"); return err; } } else if (is_release_function(func_id)) { err = release_reference(env, meta.ref_obj_id); if (err) { verbose(env, "func %s#%d reference has not been acquired before\n", func_id_name(func_id), func_id); return err; } } regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, * this is required because get_local_storage() can't return an error. */ if (func_id == BPF_FUNC_get_local_storage && !register_is_null(&regs[BPF_REG_2])) { verbose(env, "get_local_storage() doesn't support non-zero flags\n"); return -EINVAL; } /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* helper call returns 64-bit value. */ regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; if (map_value_has_spin_lock(meta.map_ptr)) regs[BPF_REG_0].id = ++env->id_gen; } else { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].mem_size = meta.mem_size; } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { int ret_btf_id; mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; ret_btf_id = *fn->ret_btf_id; if (ret_btf_id == 0) { verbose(env, "invalid return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } regs[BPF_REG_0].btf_id = ret_btf_id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } if (is_ptr_cast_function(func_id)) { /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; } else if (is_acquire_function(func_id, meta.map_ptr)) { int id = acquire_reference_state(env, insn_idx); if (id < 0) return id; /* For mark_ptr_or_null_reg() */ regs[BPF_REG_0].id = id; /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = id; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if ((func_id == BPF_FUNC_get_stack || func_id == BPF_FUNC_get_task_stack) && !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS err = get_callchain_buffers(sysctl_perf_event_max_stack); err_str = "cannot get callchain buffer for func %s#%d\n"; #else err = -ENOTSUPP; err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; #endif if (err) { verbose(env, err_str, func_id_name(func_id), func_id); return err; } env->prog->has_callchain_buf = true; } if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) env->prog->call_get_stack = true; if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_add32_overflows(s64 a, s64 b) { /* Do the add in u32, where overflow is well-defined */ s32 res = (s32)((u32)a + (u32)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s32 a, s32 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } static bool signed_sub32_overflows(s32 a, s32 b) { /* Do the sub in u64, where overflow is well-defined */ s32 res = (s32)((u32)a - (u32)b); if (b < 0) return res < a; return res > a; } static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) { bool known = tnum_is_const(reg->var_off); s64 val = reg->var_off.value; s64 smin = reg->smin_value; if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", reg_type_str[type], val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str[type], reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", reg_type_str[type]); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", smin, reg_type_str[type]); return false; } return true; } static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) { return &env->insn_aux_data[env->insn_idx]; } static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, u32 *ptr_limit, u8 opcode, bool off_is_neg) { bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || (opcode == BPF_SUB && !off_is_neg); u32 off; switch (ptr_reg->type) { case PTR_TO_STACK: /* Indirect variable offset stack access is prohibited in * unprivileged mode so it's not handled here. */ off = ptr_reg->off + ptr_reg->var_off.value; if (mask_to_left) *ptr_limit = MAX_BPF_STACK + off; else *ptr_limit = -off; return 0; case PTR_TO_MAP_VALUE: if (mask_to_left) { *ptr_limit = ptr_reg->umax_value + ptr_reg->off; } else { off = ptr_reg->smin_value + ptr_reg->off; *ptr_limit = ptr_reg->map_ptr->value_size - off; } return 0; default: return -EINVAL; } } static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, const struct bpf_insn *insn) { return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; } static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, u32 alu_state, u32 alu_limit) { /* If we arrived here from different branches with different * state or limits to sanitize, then this won't work. */ if (aux->alu_state && (aux->alu_state != alu_state || aux->alu_limit != alu_limit)) return -EACCES; /* Corresponding fixup done in fixup_bpf_calls(). */ aux->alu_state = alu_state; aux->alu_limit = alu_limit; return 0; } static int sanitize_val_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_insn_aux_data *aux = cur_aux(env); if (can_skip_alu_sanitation(env, insn)) return 0; return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); } static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, struct bpf_reg_state *dst_reg, bool off_is_neg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_insn_aux_data *aux = cur_aux(env); bool ptr_is_dst_reg = ptr_reg == dst_reg; u8 opcode = BPF_OP(insn->code); u32 alu_state, alu_limit; struct bpf_reg_state tmp; bool ret; if (can_skip_alu_sanitation(env, insn)) return 0; /* We already marked aux for masking from non-speculative * paths, thus we got here in the first place. We only care * to explore bad access from here. */ if (vstate->speculative) goto do_sim; alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) return 0; if (update_alu_sanitation_state(aux, alu_state, alu_limit)) return -EACCES; do_sim: /* Simulate and find potential out-of-bounds access under * speculative execution from truncation as a result of * masking when off was not within expected range. If off * sits in dst, then we temporarily need to move ptr there * to simulate dst (== 0) +/-= ptr. Needed, for example, * for cases where we use K-based arithmetic in one direction * and truncated reg-based in the other in order to explore * bad access. */ if (!ptr_is_dst_reg) { tmp = *dst_reg; *dst_reg = *ptr_reg; } ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); if (!ptr_is_dst_reg && ret) *dst_reg = tmp; return !ret ? -EFAULT : 0; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u32 dst = insn->dst_reg, src = insn->src_reg; u8 opcode = BPF_OP(insn->code); int ret; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(env, dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { __mark_reg_unknown(env, dst_reg); return 0; } verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } switch (ptr_reg->type) { case PTR_TO_MAP_VALUE_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case PTR_TO_MAP_VALUE: if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", off_reg == dst_reg ? dst : src); return -EACCES; } fallthrough; default: break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; /* pointer types do not carry 32-bit bounds at the moment. */ __mark_reg32_unbounded(dst_reg); switch (opcode) { case BPF_ADD: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to add from different maps or paths\n", dst); return ret; } /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->raw = 0; } break; case BPF_SUB: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to sub from different maps or paths\n", dst); return ret; } if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->raw = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); /* For unprivileged we require that resulting offset must be in bounds * in order to be able to sanitize access later on. */ if (!env->bypass_spec_v1) { if (dst_reg->type == PTR_TO_MAP_VALUE && check_map_access(env, dst, dst_reg->off, 1, false)) { verbose(env, "R%d pointer arithmetic of map value goes out of range, " "prohibited for !root\n", dst); return -EACCES; } else if (dst_reg->type == PTR_TO_STACK && check_stack_access(env, dst_reg, dst_reg->off + dst_reg->var_off.value, 1)) { verbose(env, "R%d stack pointer arithmetic goes out of range, " "prohibited for !root\n", dst); return -EACCES; } } return 0; } static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s32 smin_val = src_reg->s32_min_value; s32 smax_val = src_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { dst_reg->s32_min_value += smin_val; dst_reg->s32_max_value += smax_val; } if (dst_reg->u32_min_value + umin_val < umin_val || dst_reg->u32_max_value + umax_val < umax_val) { dst_reg->u32_min_value = 0; dst_reg->u32_max_value = U32_MAX; } else { dst_reg->u32_min_value += umin_val; dst_reg->u32_max_value += umax_val; } } static void scalar_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s64 smin_val = src_reg->smin_value; s64 smax_val = src_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } } static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s32 smin_val = src_reg->s32_min_value; s32 smax_val = src_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { dst_reg->s32_min_value -= smax_val; dst_reg->s32_max_value -= smin_val; } if (dst_reg->u32_min_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->u32_min_value = 0; dst_reg->u32_max_value = U32_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->u32_min_value -= umax_val; dst_reg->u32_max_value -= umin_val; } } static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s64 smin_val = src_reg->smin_value; s64 smax_val = src_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } } static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s32 smin_val = src_reg->s32_min_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; if (smin_val < 0 || dst_reg->s32_min_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg32_unbounded(dst_reg); return; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S32_MAX). */ if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { /* Potential overflow, we know nothing */ __mark_reg32_unbounded(dst_reg); return; } dst_reg->u32_min_value *= umin_val; dst_reg->u32_max_value *= umax_val; if (dst_reg->u32_max_value > S32_MAX) { /* Overflow possible, we know nothing */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { dst_reg->s32_min_value = dst_reg->u32_min_value; dst_reg->s32_max_value = dst_reg->u32_max_value; } } static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { s64 smin_val = src_reg->smin_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg64_unbounded(dst_reg); return; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg64_unbounded(dst_reg); return; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } } static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { bool src_known = tnum_subreg_is_const(src_reg->var_off); bool dst_known = tnum_subreg_is_const(dst_reg->var_off); struct tnum var32_off = tnum_subreg(dst_reg->var_off); s32 smin_val = src_reg->s32_min_value; u32 umax_val = src_reg->u32_max_value; /* Assuming scalar64_min_max_and will be called so its safe * to skip updating register for known 32-bit case. */ if (src_known && dst_known) return; /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->u32_min_value = var32_off.value; dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); if (dst_reg->s32_min_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->s32_min_value = dst_reg->u32_min_value; dst_reg->s32_max_value = dst_reg->u32_max_value; } } static void scalar_min_max_and(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { bool src_known = tnum_is_const(src_reg->var_off); bool dst_known = tnum_is_const(dst_reg->var_off); s64 smin_val = src_reg->smin_value; u64 umax_val = src_reg->umax_value; if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg->var_off.value); return; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); } static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { bool src_known = tnum_subreg_is_const(src_reg->var_off); bool dst_known = tnum_subreg_is_const(dst_reg->var_off); struct tnum var32_off = tnum_subreg(dst_reg->var_off); s32 smin_val = src_reg->smin_value; u32 umin_val = src_reg->umin_value; /* Assuming scalar64_min_max_or will be called so it is safe * to skip updating register for known case. */ if (src_known && dst_known) return; /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); dst_reg->u32_max_value = var32_off.value | var32_off.mask; if (dst_reg->s32_min_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->s32_min_value = dst_reg->umin_value; dst_reg->s32_max_value = dst_reg->umax_value; } } static void scalar_min_max_or(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { bool src_known = tnum_is_const(src_reg->var_off); bool dst_known = tnum_is_const(dst_reg->var_off); s64 smin_val = src_reg->smin_value; u64 umin_val = src_reg->umin_value; if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg->var_off.value); return; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); } static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, u64 umin_val, u64 umax_val) { /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; /* If we might shift our top bit out, then we know nothing */ if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { dst_reg->u32_min_value = 0; dst_reg->u32_max_value = U32_MAX; } else { dst_reg->u32_min_value <<= umin_val; dst_reg->u32_max_value <<= umax_val; } } static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { u32 umax_val = src_reg->u32_max_value; u32 umin_val = src_reg->u32_min_value; /* u32 alu operation will zext upper bits */ struct tnum subreg = tnum_subreg(dst_reg->var_off); __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); /* Not required but being careful mark reg64 bounds as unknown so * that we are forced to pick them up from tnum and zext later and * if some path skips this step we are still safe. */ __mark_reg64_unbounded(dst_reg); __update_reg32_bounds(dst_reg); } static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, u64 umin_val, u64 umax_val) { /* Special case <<32 because it is a common compiler pattern to sign * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are * positive we know this shift will also be positive so we can track * bounds correctly. Otherwise we lose all sign bit information except * what we can pick up from var_off. Perhaps we can generalize this * later to shifts of any length. */ if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; else dst_reg->smax_value = S64_MAX; if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; else dst_reg->smin_value = S64_MIN; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } } static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { u64 umax_val = src_reg->umax_value; u64 umin_val = src_reg->umin_value; /* scalar64 calc uses 32bit unshifted bounds so must be called first */ __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); } static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { struct tnum subreg = tnum_subreg(dst_reg->var_off); u32 umax_val = src_reg->u32_max_value; u32 umin_val = src_reg->u32_min_value; /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->s32_min_value = S32_MIN; dst_reg->s32_max_value = S32_MAX; dst_reg->var_off = tnum_rshift(subreg, umin_val); dst_reg->u32_min_value >>= umax_val; dst_reg->u32_max_value >>= umin_val; __mark_reg64_unbounded(dst_reg); __update_reg32_bounds(dst_reg); } static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { u64 umax_val = src_reg->umax_value; u64 umin_val = src_reg->umin_value; /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* Its not easy to operate on alu32 bounds here because it depends * on bits being shifted in. Take easy way out and mark unbounded * so we can recalculate later from tnum. */ __mark_reg32_unbounded(dst_reg); __update_reg_bounds(dst_reg); } static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { u64 umin_val = src_reg->u32_min_value; /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->u32_min_value = 0; dst_reg->u32_max_value = U32_MAX; __mark_reg64_unbounded(dst_reg); __update_reg32_bounds(dst_reg); } static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { u64 umin_val = src_reg->umin_value; /* Upon reaching here, src_known is true and umax_val is equal * to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; /* Its not easy to operate on alu32 bounds here because it depends * on bits being shifted in from upper 32-bits. Take easy way out * and mark unbounded so we can recalculate later from tnum. */ __mark_reg32_unbounded(dst_reg); __update_reg_bounds(dst_reg); } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known; s64 smin_val, smax_val; u64 umin_val, umax_val; s32 s32_min_val, s32_max_val; u32 u32_min_val, u32_max_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; u32 dst = insn->dst_reg; int ret; bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; s32_min_val = src_reg.s32_min_value; s32_max_val = src_reg.s32_max_value; u32_min_val = src_reg.u32_min_value; u32_max_val = src_reg.u32_max_value; if (alu32) { src_known = tnum_subreg_is_const(src_reg.var_off); if ((src_known && (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || s32_min_val > s32_max_val || u32_min_val > u32_max_val) { /* Taint dst register if offset had invalid bounds * derived from e.g. dead branches. */ __mark_reg_unknown(env, dst_reg); return 0; } } else { src_known = tnum_is_const(src_reg.var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds * derived from e.g. dead branches. */ __mark_reg_unknown(env, dst_reg); return 0; } } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(env, dst_reg); return 0; } /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. * There are two classes of instructions: The first class we track both * alu32 and alu64 sign/unsigned bounds independently this provides the * greatest amount of precision when alu operations are mixed with jmp32 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, * and BPF_OR. This is possible because these ops have fairly easy to * understand and calculate behavior in both 32-bit and 64-bit alu ops. * See alu32 verifier tests for examples. The second class of * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy * with regards to tracking sign/unsigned bounds because the bits may * cross subreg boundaries in the alu64 case. When this happens we mark * the reg unbounded in the subreg bound space and use the resulting * tnum to calculate an approximation of the sign/unsigned bounds. */ switch (opcode) { case BPF_ADD: ret = sanitize_val_alu(env, insn); if (ret < 0) { verbose(env, "R%d tried to add from different pointers or scalars\n", dst); return ret; } scalar32_min_max_add(dst_reg, &src_reg); scalar_min_max_add(dst_reg, &src_reg); dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: ret = sanitize_val_alu(env, insn); if (ret < 0) { verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); return ret; } scalar32_min_max_sub(dst_reg, &src_reg); scalar_min_max_sub(dst_reg, &src_reg); dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); scalar32_min_max_mul(dst_reg, &src_reg); scalar_min_max_mul(dst_reg, &src_reg); break; case BPF_AND: dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); scalar32_min_max_and(dst_reg, &src_reg); scalar_min_max_and(dst_reg, &src_reg); break; case BPF_OR: dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); scalar32_min_max_or(dst_reg, &src_reg); scalar_min_max_or(dst_reg, &src_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } if (alu32) scalar32_min_max_lsh(dst_reg, &src_reg); else scalar_min_max_lsh(dst_reg, &src_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } if (alu32) scalar32_min_max_rsh(dst_reg, &src_reg); else scalar_min_max_rsh(dst_reg, &src_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } if (alu32) scalar32_min_max_arsh(dst_reg, &src_reg); else scalar_min_max_arsh(dst_reg, &src_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } /* ALU32 ops are zero extended into 64bit register */ if (alu32) zext_32_to_64(dst_reg); __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int err; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. Disallow all math except * pointer subtraction */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ err = mark_chain_precision(env, insn->dst_reg); if (err) return err; return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); } } else if (ptr_reg) { /* pointer += scalar */ err = mark_chain_precision(env, insn->src_reg); if (err) return err; return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) /* pointer += K */ return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand, mark as required later */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { struct bpf_reg_state *src_reg = regs + insn->src_reg; struct bpf_reg_state *dst_reg = regs + insn->dst_reg; if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; dst_reg->subreg_def = DEF_NOT_SUBREG; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } else if (src_reg->type == SCALAR_VALUE) { *dst_reg = *src_reg; dst_reg->live |= REG_LIVE_WRITTEN; dst_reg->subreg_def = env->insn_idx + 1; } else { mark_reg_unknown(env, regs, insn->dst_reg); } zext_32_to_64(dst_reg); } } else { /* case: R = imm * remember the value we stored into this reg */ /* clear any state __mark_reg_known doesn't set */ mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void __find_good_pkt_pointers(struct bpf_func_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, u16 new_range) { struct bpf_reg_state *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; if (reg->type == type && reg->id == dst_reg->id) /* keep the maximum range already checked */ reg->range = max(reg->range, new_range); } bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i <= vstate->curframe; i++) __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, new_range); } static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) { struct tnum subreg = tnum_subreg(reg->var_off); s32 sval = (s32)val; switch (opcode) { case BPF_JEQ: if (tnum_is_const(subreg)) return !!tnum_equals_const(subreg, val); break; case BPF_JNE: if (tnum_is_const(subreg)) return !tnum_equals_const(subreg, val); break; case BPF_JSET: if ((~subreg.mask & subreg.value) & val) return 1; if (!((subreg.mask | subreg.value) & val)) return 0; break; case BPF_JGT: if (reg->u32_min_value > val) return 1; else if (reg->u32_max_value <= val) return 0; break; case BPF_JSGT: if (reg->s32_min_value > sval) return 1; else if (reg->s32_max_value < sval) return 0; break; case BPF_JLT: if (reg->u32_max_value < val) return 1; else if (reg->u32_min_value >= val) return 0; break; case BPF_JSLT: if (reg->s32_max_value < sval) return 1; else if (reg->s32_min_value >= sval) return 0; break; case BPF_JGE: if (reg->u32_min_value >= val) return 1; else if (reg->u32_max_value < val) return 0; break; case BPF_JSGE: if (reg->s32_min_value >= sval) return 1; else if (reg->s32_max_value < sval) return 0; break; case BPF_JLE: if (reg->u32_max_value <= val) return 1; else if (reg->u32_min_value > val) return 0; break; case BPF_JSLE: if (reg->s32_max_value <= sval) return 1; else if (reg->s32_min_value > sval) return 0; break; } return -1; } static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { s64 sval = (s64)val; switch (opcode) { case BPF_JEQ: if (tnum_is_const(reg->var_off)) return !!tnum_equals_const(reg->var_off, val); break; case BPF_JNE: if (tnum_is_const(reg->var_off)) return !tnum_equals_const(reg->var_off, val); break; case BPF_JSET: if ((~reg->var_off.mask & reg->var_off.value) & val) return 1; if (!((reg->var_off.mask | reg->var_off.value) & val)) return 0; break; case BPF_JGT: if (reg->umin_value > val) return 1; else if (reg->umax_value <= val) return 0; break; case BPF_JSGT: if (reg->smin_value > sval) return 1; else if (reg->smax_value < sval) return 0; break; case BPF_JLT: if (reg->umax_value < val) return 1; else if (reg->umin_value >= val) return 0; break; case BPF_JSLT: if (reg->smax_value < sval) return 1; else if (reg->smin_value >= sval) return 0; break; case BPF_JGE: if (reg->umin_value >= val) return 1; else if (reg->umax_value < val) return 0; break; case BPF_JSGE: if (reg->smin_value >= sval) return 1; else if (reg->smax_value < sval) return 0; break; case BPF_JLE: if (reg->umax_value <= val) return 1; else if (reg->umin_value > val) return 0; break; case BPF_JSLE: if (reg->smax_value <= sval) return 1; else if (reg->smin_value > sval) return 0; break; } return -1; } /* compute branch direction of the expression "if (reg opcode val) goto target;" * and return: * 1 - branch will be taken and "goto target" will be executed * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value * range [0,10] */ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, bool is_jmp32) { if (__is_pointer_value(false, reg)) { if (!reg_type_not_null(reg->type)) return -1; /* If pointer is valid tests against zero will fail so we can * use this to direct branch taken. */ if (val != 0) return -1; switch (opcode) { case BPF_JEQ: return 0; case BPF_JNE: return 1; default: return -1; } } if (is_jmp32) return is_branch32_taken(reg, val, opcode); return is_branch64_taken(reg, val, opcode); } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u32 val32, u8 opcode, bool is_jmp32) { struct tnum false_32off = tnum_subreg(false_reg->var_off); struct tnum false_64off = false_reg->var_off; struct tnum true_32off = tnum_subreg(true_reg->var_off); struct tnum true_64off = true_reg->var_off; s64 sval = (s64)val; s32 sval32 = (s32)val32; /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: case BPF_JNE: { struct bpf_reg_state *reg = opcode == BPF_JEQ ? true_reg : false_reg; /* For BPF_JEQ, if this is false we know nothing Jon Snow, but * if it is true we know the value for sure. Likewise for * BPF_JNE. */ if (is_jmp32) __mark_reg32_known(reg, val32); else __mark_reg_known(reg, val); break; } case BPF_JSET: if (is_jmp32) { false_32off = tnum_and(false_32off, tnum_const(~val32)); if (is_power_of_2(val32)) true_32off = tnum_or(true_32off, tnum_const(val32)); } else { false_64off = tnum_and(false_64off, tnum_const(~val)); if (is_power_of_2(val)) true_64off = tnum_or(true_64off, tnum_const(val)); } break; case BPF_JGE: case BPF_JGT: { if (is_jmp32) { u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; false_reg->u32_max_value = min(false_reg->u32_max_value, false_umax); true_reg->u32_min_value = max(true_reg->u32_min_value, true_umin); } else { u64 false_umax = opcode == BPF_JGT ? val : val - 1; u64 true_umin = opcode == BPF_JGT ? val + 1 : val; false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); } break; } case BPF_JSGE: case BPF_JSGT: { if (is_jmp32) { s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); } else { s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); } break; } case BPF_JLE: case BPF_JLT: { if (is_jmp32) { u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; false_reg->u32_min_value = max(false_reg->u32_min_value, false_umin); true_reg->u32_max_value = min(true_reg->u32_max_value, true_umax); } else { u64 false_umin = opcode == BPF_JLT ? val : val + 1; u64 true_umax = opcode == BPF_JLT ? val - 1 : val; false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax); } break; } case BPF_JSLE: case BPF_JSLT: { if (is_jmp32) { s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); } else { s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); } break; } default: return; } if (is_jmp32) { false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), tnum_subreg(false_32off)); true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), tnum_subreg(true_32off)); __reg_combine_32_into_64(false_reg); __reg_combine_32_into_64(true_reg); } else { false_reg->var_off = false_64off; true_reg->var_off = true_64off; __reg_combine_64_into_32(false_reg); __reg_combine_64_into_32(true_reg); } } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u32 val32, u8 opcode, bool is_jmp32) { /* How can we transform "a <op> b" into "b <op> a"? */ static const u8 opcode_flip[16] = { /* these stay the same */ [BPF_JEQ >> 4] = BPF_JEQ, [BPF_JNE >> 4] = BPF_JNE, [BPF_JSET >> 4] = BPF_JSET, /* these swap "lesser" and "greater" (L and G in the opcodes) */ [BPF_JGE >> 4] = BPF_JLE, [BPF_JGT >> 4] = BPF_JLT, [BPF_JLE >> 4] = BPF_JGE, [BPF_JLT >> 4] = BPF_JGT, [BPF_JSGE >> 4] = BPF_JSLE, [BPF_JSGT >> 4] = BPF_JSLT, [BPF_JSLE >> 4] = BPF_JSGE, [BPF_JSLT >> 4] = BPF_JSGT }; opcode = opcode_flip[opcode >> 4]; /* This uses zero as "not present in table"; luckily the zero opcode, * BPF_JA, can't get here. */ if (opcode) reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null) { if (reg_type_may_be_null(reg->type) && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { const struct bpf_map *map = reg->map_ptr; if (map->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = map->inner_map_meta; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || map->map_type == BPF_MAP_TYPE_SOCKHASH) { reg->type = PTR_TO_SOCKET; } else { reg->type = PTR_TO_MAP_VALUE; } } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { reg->type = PTR_TO_SOCKET; } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { reg->type = PTR_TO_SOCK_COMMON; } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { reg->type = PTR_TO_TCP_SOCK; } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { reg->type = PTR_TO_BTF_ID; } else if (reg->type == PTR_TO_MEM_OR_NULL) { reg->type = PTR_TO_MEM; } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { reg->type = PTR_TO_RDONLY_BUF; } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { reg->type = PTR_TO_RDWR_BUF; } if (is_null) { /* We don't need id and ref_obj_id from this point * onwards anymore, thus we should better reset it, * so that state pruning has chances to take effect. */ reg->id = 0; reg->ref_obj_id = 0; } else if (!reg_may_point_to_spin_lock(reg)) { /* For not-NULL ptr, reg->ref_obj_id will be reset * in release_reg_references(). * * reg->id is still used by spin_lock ptr. Other * than spin_lock ptr type, reg->id can be reset. */ reg->id = 0; } } } static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, bool is_null) { struct bpf_reg_state *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; mark_ptr_or_null_reg(state, reg, id, is_null); } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; u32 ref_obj_id = regs[regno].ref_obj_id; u32 id = regs[regno].id; int i; if (ref_obj_id && ref_obj_id == id && is_null) /* regs[regno] is in the " == NULL" branch. * No one could have freed the reference state before * doing the NULL check. */ WARN_ON_ONCE(release_reference_state(state, id)); for (i = 0; i <= vstate->curframe; i++) __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; /* Pointers are always 64-bit. */ if (BPF_CLASS(insn->code) == BPF_JMP32) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; u8 opcode = BPF_OP(insn->code); bool is_jmp32; int pred = -1; int err; /* Only conditional jumps are expected to reach here. */ if (opcode == BPF_JA || opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } src_reg = &regs[insn->src_reg]; } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; if (BPF_SRC(insn->code) == BPF_K) { pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); } else if (src_reg->type == SCALAR_VALUE && is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { pred = is_branch_taken(dst_reg, tnum_subreg(src_reg->var_off).value, opcode, is_jmp32); } else if (src_reg->type == SCALAR_VALUE && !is_jmp32 && tnum_is_const(src_reg->var_off)) { pred = is_branch_taken(dst_reg, src_reg->var_off.value, opcode, is_jmp32); } if (pred >= 0) { /* If we get here with a dst_reg pointer type it is because * above is_branch_taken() special cased the 0 comparison. */ if (!__is_pointer_value(false, dst_reg)) err = mark_chain_precision(env, insn->dst_reg); if (BPF_SRC(insn->code) == BPF_X && !err) err = mark_chain_precision(env, insn->src_reg); if (err) return err; } if (pred == 1) { /* only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else if (pred == 0) { /* only follow fall-through branch, since * that's where the program will go */ return 0; } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { struct bpf_reg_state *src_reg = &regs[insn->src_reg]; if (dst_reg->type == SCALAR_VALUE && src_reg->type == SCALAR_VALUE) { if (tnum_is_const(src_reg->var_off) || (is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off)))) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, src_reg->var_off.value, tnum_subreg(src_reg->var_off).value, opcode, is_jmp32); else if (tnum_is_const(dst_reg->var_off) || (is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off)))) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], src_reg, dst_reg->var_off.value, tnum_subreg(dst_reg->var_off).value, opcode, is_jmp32); else if (!is_jmp32 && (opcode == BPF_JEQ || opcode == BPF_JNE)) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], src_reg, dst_reg, opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, (u32)insn->imm, opcode, is_jmp32); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). * NOTE: these optimizations below are related with pointer comparison * which will never be JMP32. */ if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && reg_type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_ptr_or_null_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_ptr_or_null_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level & BPF_LOG_LEVEL) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_insn_aux_data *aux = cur_aux(env); struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map; int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } map = env->used_maps[aux->map_index]; mark_reg_known_zero(env, regs, insn->dst_reg); regs[insn->dst_reg].map_ptr = map; if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; regs[insn->dst_reg].off = aux->map_off; if (map_value_has_spin_lock(map)) regs[insn->dst_reg].id = ++env->id_gen; } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { regs[insn->dst_reg].type = CONST_PTR_TO_MAP; } else { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); static const int ctx_reg = BPF_REG_6; u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (!env->ops->gen_ld_abs) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (env->subprog_cnt > 1) { /* when program has LD_ABS insn JITs and interpreter assume * that r1 == ctx == skb which is not the case for callees * that can have arbitrary arguments. It's problematic * for main prog as well since JITs would need to analyze * all functions in order to make proper register save/restore * decisions in the main prog. Hence disallow LD_ABS with calls */ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, ctx_reg, SRC_OP); if (err) return err; /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as * gen_ld_abs() may terminate the program at runtime, leading to * reference leak. */ err = check_reference_leak(env); if (err) { verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); return err; } if (env->cur_state->active_spin_lock) { verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); return -EINVAL; } if (regs[ctx_reg].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg); if (err < 0) return err; /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); /* ld_abs load up to 32-bit skb data. */ regs[BPF_REG_0].subreg_def = env->insn_idx + 1; return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct tnum enforce_attach_type_range = tnum_unknown; const struct bpf_prog *prog = env->prog; struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); int err; /* LSM and struct_ops func-ptr's return type could be "void" */ if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || env->prog->type == BPF_PROG_TYPE_LSM) && !prog->aux->attach_func_proto->type) return 0; /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) range = tnum_range(1, 1); break; case BPF_PROG_TYPE_CGROUP_SKB: if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { range = tnum_range(0, 3); enforce_attach_type_range = tnum_range(2, 3); } break; case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: break; case BPF_PROG_TYPE_RAW_TRACEPOINT: if (!env->prog->aux->attach_btf_id) return 0; range = tnum_const(0); break; case BPF_PROG_TYPE_TRACING: switch (env->prog->expected_attach_type) { case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: range = tnum_const(0); break; case BPF_TRACE_RAW_TP: case BPF_MODIFY_RETURN: return 0; case BPF_TRACE_ITER: break; default: return -ENOTSUPP; } break; case BPF_PROG_TYPE_SK_LOOKUP: range = tnum_range(SK_DROP, SK_PASS); break; case BPF_PROG_TYPE_EXT: /* freplace program can return anything as its return value * depends on the to-be-replaced kernel func or bpf program. */ default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { char tn_buf[48]; verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } tnum_strn(tn_buf, sizeof(tn_buf), range); verbose(env, " should have been in %s\n", tn_buf); return -EINVAL; } if (!tnum_is_unknown(enforce_attach_type_range) && tnum_in(enforce_attach_type_range, reg->var_off)) env->prog->enforce_expected_attach_type = 1; return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; static u32 state_htab_size(struct bpf_verifier_env *env) { return env->prog->len; } static struct bpf_verifier_state_list **explored_state( struct bpf_verifier_env *env, int idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_func_state *state = cur->frame[cur->curframe]; return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; } static void init_explored_state(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].prune_point = true; } /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, bool loop_ok) { int *insn_stack = env->cfg.insn_stack; int *insn_state = env->cfg.insn_state; if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose_linfo(env, t, "%d: ", t); verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ init_explored_state(env, w); if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (env->cfg.cur_stack >= env->prog->len) return -E2BIG; insn_stack[env->cfg.cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { if (loop_ok && env->bpf_capable) return 0; verbose_linfo(env, t, "%d: ", t); verbose_linfo(env, w, "%d: ", w); verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int *insn_stack, *insn_state; int ret = 0; int i, t; insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kvfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ env->cfg.cur_stack = 1; peek_stack: if (env->cfg.cur_stack == 0) goto check_state; t = insn_stack[env->cfg.cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP || BPF_CLASS(insns[t].code) == BPF_JMP32) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env, false); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) init_explored_state(env, t + 1); if (insns[t].src_reg == BPF_PSEUDO_CALL) { init_explored_state(env, t); ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, false); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, true); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* unconditional jmp is not a good pruning point, * but it's marked, since backtracking needs * to record jmp history in is_state_visited(). */ init_explored_state(env, t + insns[t].off + 1); /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) init_explored_state(env, t + 1); } else { /* conditional jump with two edges */ init_explored_state(env, t); ret = push_insn(t, t + 1, FALLTHROUGH, env, true); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env, false); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (env->cfg.cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kvfree(insn_state); kvfree(insn_stack); env->cfg.insn_state = env->cfg.insn_stack = NULL; return ret; } /* The minimum supported BTF func info size */ #define MIN_BPF_FUNCINFO_SIZE 8 #define MAX_FUNCINFO_REC_SIZE 252 static int check_btf_func(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, nfuncs, urec_size, min_size; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; struct bpf_func_info_aux *info_aux = NULL; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; void __user *urecord; u32 prev_offset = 0; int ret = -ENOMEM; nfuncs = attr->func_info_cnt; if (!nfuncs) return 0; if (nfuncs != env->subprog_cnt) { verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); return -EINVAL; } urec_size = attr->func_info_rec_size; if (urec_size < MIN_BPF_FUNCINFO_SIZE || urec_size > MAX_FUNCINFO_REC_SIZE || urec_size % sizeof(u32)) { verbose(env, "invalid func info rec size %u\n", urec_size); return -EINVAL; } prog = env->prog; btf = prog->aux->btf; urecord = u64_to_user_ptr(attr->func_info); min_size = min_t(u32, krec_size, urec_size); krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); if (!krecord) return -ENOMEM; info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); if (!info_aux) goto err_free; for (i = 0; i < nfuncs; i++) { ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); if (ret) { if (ret == -E2BIG) { verbose(env, "nonzero tailing record in func info"); /* set the size kernel expects so loader can zero * out the rest of the record. */ if (put_user(min_size, &uattr->func_info_rec_size)) ret = -EFAULT; } goto err_free; } if (copy_from_user(&krecord[i], urecord, min_size)) { ret = -EFAULT; goto err_free; } /* check insn_off */ if (i == 0) { if (krecord[i].insn_off) { verbose(env, "nonzero insn_off %u for the first func info record", krecord[i].insn_off); ret = -EINVAL; goto err_free; } } else if (krecord[i].insn_off <= prev_offset) { verbose(env, "same or smaller insn offset (%u) than previous func info record (%u)", krecord[i].insn_off, prev_offset); ret = -EINVAL; goto err_free; } if (env->subprog_info[i].start != krecord[i].insn_off) { verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); ret = -EINVAL; goto err_free; } /* check type_id */ type = btf_type_by_id(btf, krecord[i].type_id); if (!type || !btf_type_is_func(type)) { verbose(env, "invalid type id %d in func info", krecord[i].type_id); ret = -EINVAL; goto err_free; } info_aux[i].linkage = BTF_INFO_VLEN(type->info); prev_offset = krecord[i].insn_off; urecord += urec_size; } prog->aux->func_info = krecord; prog->aux->func_info_cnt = nfuncs; prog->aux->func_info_aux = info_aux; return 0; err_free: kvfree(krecord); kfree(info_aux); return ret; } static void adjust_btf_func(struct bpf_verifier_env *env) { struct bpf_prog_aux *aux = env->prog->aux; int i; if (!aux->func_info) return; for (i = 0; i < env->subprog_cnt; i++) aux->func_info[i].insn_off = env->subprog_info[i].start; } #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ sizeof(((struct bpf_line_info *)(0))->line_col)) #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE static int check_btf_line(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; struct bpf_subprog_info *sub; struct bpf_line_info *linfo; struct bpf_prog *prog; const struct btf *btf; void __user *ulinfo; int err; nr_linfo = attr->line_info_cnt; if (!nr_linfo) return 0; rec_size = attr->line_info_rec_size; if (rec_size < MIN_BPF_LINEINFO_SIZE || rec_size > MAX_LINEINFO_REC_SIZE || rec_size & (sizeof(u32) - 1)) return -EINVAL; /* Need to zero it in case the userspace may * pass in a smaller bpf_line_info object. */ linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), GFP_KERNEL | __GFP_NOWARN); if (!linfo) return -ENOMEM; prog = env->prog; btf = prog->aux->btf; s = 0; sub = env->subprog_info; ulinfo = u64_to_user_ptr(attr->line_info); expected_size = sizeof(struct bpf_line_info); ncopy = min_t(u32, expected_size, rec_size); for (i = 0; i < nr_linfo; i++) { err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); if (err) { if (err == -E2BIG) { verbose(env, "nonzero tailing record in line_info"); if (put_user(expected_size, &uattr->line_info_rec_size)) err = -EFAULT; } goto err_free; } if (copy_from_user(&linfo[i], ulinfo, ncopy)) { err = -EFAULT; goto err_free; } /* * Check insn_off to ensure * 1) strictly increasing AND * 2) bounded by prog->len * * The linfo[0].insn_off == 0 check logically falls into * the later "missing bpf_line_info for func..." case * because the first linfo[0].insn_off must be the * first sub also and the first sub must have * subprog_info[0].start == 0. */ if ((i && linfo[i].insn_off <= prev_offset) || linfo[i].insn_off >= prog->len) { verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", i, linfo[i].insn_off, prev_offset, prog->len); err = -EINVAL; goto err_free; } if (!prog->insnsi[linfo[i].insn_off].code) { verbose(env, "Invalid insn code at line_info[%u].insn_off\n", i); err = -EINVAL; goto err_free; } if (!btf_name_by_offset(btf, linfo[i].line_off) || !btf_name_by_offset(btf, linfo[i].file_name_off)) { verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); err = -EINVAL; goto err_free; } if (s != env->subprog_cnt) { if (linfo[i].insn_off == sub[s].start) { sub[s].linfo_idx = i; s++; } else if (sub[s].start < linfo[i].insn_off) { verbose(env, "missing bpf_line_info for func#%u\n", s); err = -EINVAL; goto err_free; } } prev_offset = linfo[i].insn_off; ulinfo += rec_size; } if (s != env->subprog_cnt) { verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", env->subprog_cnt - s, s); err = -EINVAL; goto err_free; } prog->aux->linfo = linfo; prog->aux->nr_linfo = nr_linfo; return 0; err_free: kvfree(linfo); return err; } static int check_btf_info(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { struct btf *btf; int err; if (!attr->func_info_cnt && !attr->line_info_cnt) return 0; btf = btf_get_by_fd(attr->prog_btf_fd); if (IS_ERR(btf)) return PTR_ERR(btf); env->prog->aux->btf = btf; err = check_btf_func(env, attr, uattr); if (err) return err; err = check_btf_line(env, attr, uattr); if (err) return err; return 0; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } static void clean_func_state(struct bpf_verifier_env *env, struct bpf_func_state *st) { enum bpf_reg_liveness live; int i, j; for (i = 0; i < BPF_REG_FP; i++) { live = st->regs[i].live; /* liveness must not touch this register anymore */ st->regs[i].live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) /* since the register is unused, clear its state * to make further comparison simpler */ __mark_reg_not_init(env, &st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { live = st->stack[i].spilled_ptr.live; /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { __mark_reg_not_init(env, &st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } } } static void clean_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { int i; if (st->frame[0]->regs[0].live & REG_LIVE_DONE) /* all regs in this state in all frames were already marked */ return; for (i = 0; i <= st->curframe; i++) clean_func_state(env, st->frame[i]); } /* the parentage chains form a tree. * the verifier states are added to state lists at given insn and * pushed into state stack for future exploration. * when the verifier reaches bpf_exit insn some of the verifer states * stored in the state lists have their final liveness state already, * but a lot of states will get revised from liveness point of view when * the verifier explores other branches. * Example: * 1: r0 = 1 * 2: if r1 == 100 goto pc+1 * 3: r0 = 2 * 4: exit * when the verifier reaches exit insn the register r0 in the state list of * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch * of insn 2 and goes exploring further. At the insn 4 it will walk the * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. * * Since the verifier pushes the branch states as it sees them while exploring * the program the condition of walking the branch instruction for the second * time means that all states below this branch were already explored and * their final liveness markes are already propagated. * Hence when the verifier completes the search of state list in is_state_visited() * we can call this clean_live_states() function to mark all liveness states * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' * will not be used. * This function also clears the registers and stack for states that !READ * to simplify state merging. * * Important note here that walking the same branch instruction in the callee * doesn't meant that the states are DONE. The verifier has to compare * the callsites */ static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { struct bpf_verifier_state_list *sl; int i; sl = *explored_state(env, insn); while (sl) { if (sl->state.branches) goto next; if (sl->state.insn_idx != insn || sl->state.curframe != cur->curframe) goto next; for (i = 0; i <= cur->curframe; i++) if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) goto next; clean_verifier_state(env, &sl->state); next: sl = sl->next; } } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { bool equal; if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to * the same stack frame, since fp-8 in foo != fp-8 in bar */ return equal && rold->frameno == rcur->frameno; if (equal) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { if (!rold->precise && !rcur->precise) return true; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * 'id' is not compared, since it's only used for maps with * bpf_spin_lock inside map element and in such cases if * the rest of the prog is valid for one map element then * it's valid for all map elements regardless of the key * used in bpf_map_lookup() */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct idpair *idmap) { int i, spi; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { i += BPF_REG_SIZE - 1; /* explored state didn't use this */ continue; } if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* explored stack has more populated slots than current stack * and these slots were used */ if (i >= cur->allocated_stack) return false; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) { if (old->acquired_refs != cur->acquired_refs) return false; return !memcmp(old->refs, cur->refs, sizeof(*old->refs) * old->acquired_refs); } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_func_state *old, struct bpf_func_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; if (!refsafe(old, cur)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* Verification state from speculative execution simulation * must never prune a non-speculative execution one. */ if (old->speculative && !cur->speculative) return false; if (old->active_spin_lock != cur->active_spin_lock) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } /* Return 0 if no propagation happened. Return negative error code if error * happened. Otherwise, return the propagated bit. */ static int propagate_liveness_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, struct bpf_reg_state *parent_reg) { u8 parent_flag = parent_reg->live & REG_LIVE_READ; u8 flag = reg->live & REG_LIVE_READ; int err; /* When comes here, read flags of PARENT_REG or REG could be any of * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need * of propagation if PARENT_REG has strongest REG_LIVE_READ64. */ if (parent_flag == REG_LIVE_READ64 || /* Or if there is no read flag from REG. */ !flag || /* Or if the read flag from REG is the same as PARENT_REG. */ parent_flag == flag) return 0; err = mark_reg_read(env, reg, parent_reg, flag); if (err) return err; return flag; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at an * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, struct bpf_verifier_state *vparent) { struct bpf_reg_state *state_reg, *parent_reg; struct bpf_func_state *state, *parent; int i, frame, err = 0; if (vparent->curframe != vstate->curframe) { WARN(1, "propagate_live: parent frame %d current frame %d\n", vparent->curframe, vstate->curframe); return -EFAULT; } /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); for (frame = 0; frame <= vstate->curframe; frame++) { parent = vparent->frame[frame]; state = vstate->frame[frame]; parent_reg = parent->regs; state_reg = state->regs; /* We don't need to worry about FP liveness, it's read-only */ for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { err = propagate_liveness_reg(env, &state_reg[i], &parent_reg[i]); if (err < 0) return err; if (err == REG_LIVE_READ64) mark_insn_zext(env, &parent_reg[i]); } /* Propagate stack slots. */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { parent_reg = &parent->stack[i].spilled_ptr; state_reg = &state->stack[i].spilled_ptr; err = propagate_liveness_reg(env, state_reg, parent_reg); if (err < 0) return err; } } return 0; } /* find precise scalars in the previous equivalent state and * propagate them into the current state */ static int propagate_precision(struct bpf_verifier_env *env, const struct bpf_verifier_state *old) { struct bpf_reg_state *state_reg; struct bpf_func_state *state; int i, err = 0; state = old->frame[old->curframe]; state_reg = state->regs; for (i = 0; i < BPF_REG_FP; i++, state_reg++) { if (state_reg->type != SCALAR_VALUE || !state_reg->precise) continue; if (env->log.level & BPF_LOG_LEVEL2) verbose(env, "propagating r%d\n", i); err = mark_chain_precision(env, i); if (err < 0) return err; } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; state_reg = &state->stack[i].spilled_ptr; if (state_reg->type != SCALAR_VALUE || !state_reg->precise) continue; if (env->log.level & BPF_LOG_LEVEL2) verbose(env, "propagating fp%d\n", (-i - 1) * BPF_REG_SIZE); err = mark_chain_precision_stack(env, i); if (err < 0) return err; } return 0; } static bool states_maybe_looping(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct bpf_func_state *fold, *fcur; int i, fr = cur->curframe; if (old->curframe != fr) return false; fold = old->frame[fr]; fcur = cur->frame[fr]; for (i = 0; i < MAX_BPF_REG; i++) if (memcmp(&fold->regs[i], &fcur->regs[i], offsetof(struct bpf_reg_state, parent))) return false; return true; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl, **pprev; struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; bool add_new_state = env->test_state_freq ? true : false; cur->last_insn_idx = env->prev_insn_idx; if (!env->insn_aux_data[insn_idx].prune_point) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1 * Do not add new state for future pruning if the verifier hasn't seen * at least 2 jumps and at least 8 instructions. * This heuristics helps decrease 'total_states' and 'peak_states' metric. * In tests that amounts to up to 50% reduction into total verifier * memory consumption and 20% verifier time speedup. */ if (env->jmps_processed - env->prev_jmps_processed >= 2 && env->insn_processed - env->prev_insn_processed >= 8) add_new_state = true; pprev = explored_state(env, insn_idx); sl = *pprev; clean_live_states(env, insn_idx, cur); while (sl) { states_cnt++; if (sl->state.insn_idx != insn_idx) goto next; if (sl->state.branches) { if (states_maybe_looping(&sl->state, cur) && states_equal(env, &sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); return -EINVAL; } /* if the verifier is processing a loop, avoid adding new state * too often, since different loop iterations have distinct * states and may not help future pruning. * This threshold shouldn't be too low to make sure that * a loop with large bound will be rejected quickly. * The most abusive loop will be: * r1 += 1 * if r1 < 1000000 goto pc-2 * 1M insn_procssed limit / 100 == 10k peak states. * This threshold shouldn't be too high either, since states * at the end of the loop are likely to be useful in pruning. */ if (env->jmps_processed - env->prev_jmps_processed < 20 && env->insn_processed - env->prev_insn_processed < 100) add_new_state = false; goto miss; } if (states_equal(env, &sl->state, cur)) { sl->hit_cnt++; /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ err = propagate_liveness(env, &sl->state, cur); /* if previous state reached the exit with precision and * current state is equivalent to it (except precsion marks) * the precision needs to be propagated back in * the current state. */ err = err ? : push_jmp_history(env, cur); err = err ? : propagate_precision(env, &sl->state); if (err) return err; return 1; } miss: /* when new state is not going to be added do not increase miss count. * Otherwise several loop iterations will remove the state * recorded earlier. The goal of these heuristics is to have * states from some iterations of the loop (some in the beginning * and some at the end) to help pruning. */ if (add_new_state) sl->miss_cnt++; /* heuristic to determine whether this state is beneficial * to keep checking from state equivalence point of view. * Higher numbers increase max_states_per_insn and verification time, * but do not meaningfully decrease insn_processed. */ if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { /* the state is unlikely to be useful. Remove it to * speed up verification */ *pprev = sl->next; if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { u32 br = sl->state.branches; WARN_ONCE(br, "BUG live_done but branches_to_explore %d\n", br); free_verifier_state(&sl->state, false); kfree(sl); env->peak_states--; } else { /* cannot free this state, since parentage chain may * walk it later. Add it for free_list instead to * be freed at the end of verification */ sl->next = env->free_list; env->free_list = sl; } sl = *pprev; continue; } next: pprev = &sl->next; sl = *pprev; } if (env->max_states_per_insn < states_cnt) env->max_states_per_insn = states_cnt; if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) return push_jmp_history(env, cur); if (!add_new_state) return push_jmp_history(env, cur); /* There were no equivalent states, remember the current one. * Technically the current state is not proven to be safe yet, * but it will either reach outer most bpf_exit (which means it's safe) * or it will be rejected. When there are no loops the verifier won't be * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) * again on the way to bpf_exit. * When looping the sl->state.branches will be > 0 and this state * will not be considered for equivalence until branches == 0. */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; env->total_states++; env->peak_states++; env->prev_jmps_processed = env->jmps_processed; env->prev_insn_processed = env->insn_processed; /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); if (err) { free_verifier_state(new, false); kfree(new_sl); return err; } new->insn_idx = insn_idx; WARN_ONCE(new->branches != 1, "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); cur->parent = new; cur->first_insn_idx = insn_idx; clear_jmp_history(cur); new_sl->next = *explored_state(env, insn_idx); *explored_state(env, insn_idx) = new_sl; /* connect new state to parentage chain. Current frame needs all * registers connected. Only r6 - r9 of the callers are alive (pushed * to the stack implicitly by JITs) so in callers' frames connect just * r6 - r9 as an optimization. Callers will have r1 - r5 connected to * the state of the call instruction (with WRITTEN set), and r0 comes * from callee with its full parentage chain, anyway. */ /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (j = 0; j <= cur->curframe; j++) { for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; for (i = 0; i < BPF_REG_FP; i++) cur->frame[j]->regs[i].live = REG_LIVE_NONE; } /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; struct bpf_func_state *newframe = new->frame[j]; for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; frame->stack[i].spilled_ptr.parent = &newframe->stack[i].spilled_ptr; } } return 0; } /* Return true if it's OK to have the same insn return a different type. */ static bool reg_type_mismatch_ok(enum bpf_reg_type type) { switch (type) { case PTR_TO_CTX: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BTF_ID_OR_NULL: return false; default: return true; } } /* If an instruction was previously used with particular pointer types, then we * need to be careful to avoid cases such as the below, where it may be ok * for one branch accessing the pointer, but not ok for the other branch: * * R1 = sock_ptr * goto X; * ... * R1 = some_other_valid_ptr; * goto X; * ... * R2 = *(u32 *)(R1 + 0); */ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) { return src != prev && (!reg_type_mismatch_ok(src) || !reg_type_mismatch_ok(prev)); } static int do_check(struct bpf_verifier_env *env) { bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); struct bpf_verifier_state *state = env->cur_state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; bool do_print_state = false; int prev_insn_idx = -1; for (;;) { struct bpf_insn *insn; u8 class; int err; env->prev_insn_idx = prev_insn_idx; if (env->insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", env->insn_idx, insn_cnt); return -EFAULT; } insn = &insns[env->insn_idx]; class = BPF_CLASS(insn->code); if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", env->insn_processed); return -E2BIG; } err = is_state_visited(env, env->insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level & BPF_LOG_LEVEL) { if (do_print_state) verbose(env, "\nfrom %d to %d%s: safe\n", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); else verbose(env, "%d: safe\n", env->insn_idx); } goto process_bpf_exit; } if (signal_pending(current)) return -EAGAIN; if (need_resched()) cond_resched(); if (env->log.level & BPF_LOG_LEVEL2 || (env->log.level & BPF_LOG_LEVEL && do_print_state)) { if (env->log.level & BPF_LOG_LEVEL2) verbose(env, "%d:", env->insn_idx); else verbose(env, "\nfrom %d to %d%s:", env->prev_insn_idx, env->insn_idx, env->cur_state->speculative ? " (speculative execution)" : ""); print_verifier_state(env, state->frame[state->curframe]); do_print_state = false; } if (env->log.level & BPF_LOG_LEVEL) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; verbose_linfo(env, env->insn_idx, "; "); verbose(env, "%d: ", env->insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (bpf_prog_is_dev_bound(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, env->insn_idx, env->prev_insn_idx); if (err) return err; } regs = cur_regs(env); env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; prev_insn_idx = env->insn_idx; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, false); if (err) return err; prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, env->insn_idx, insn); if (err) return err; env->insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, false); if (err) return err; prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, false); if (err) return err; } else if (class == BPF_JMP || class == BPF_JMP32) { u8 opcode = BPF_OP(insn->code); env->jmps_processed++; if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } if (env->cur_state->active_spin_lock && (insn->src_reg == BPF_PSEUDO_CALL || insn->imm != BPF_FUNC_spin_unlock)) { verbose(env, "function calls are not allowed while holding a lock\n"); return -EINVAL; } if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &env->insn_idx); else err = check_helper_call(env, insn->imm, env->insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } env->insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } if (env->cur_state->active_spin_lock) { verbose(env, "bpf_spin_unlock is missing\n"); return -EINVAL; } if (state->curframe) { /* exit from nested function */ err = prepare_func_exit(env, &env->insn_idx); if (err) return err; do_print_state = true; continue; } err = check_reference_leak(env); if (err) return err; err = check_return_code(env); if (err) return err; process_bpf_exit: update_branch_counts(env, env->cur_state); err = pop_stack(env, &prev_insn_idx, &env->insn_idx, pop_log); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &env->insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; env->insn_idx++; env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } env->insn_idx++; } return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static bool is_tracing_prog_type(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_KPROBE: case BPF_PROG_TYPE_TRACEPOINT: case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_RAW_TRACEPOINT: return true; default: return false; } } static bool is_preallocated_map(struct bpf_map *map) { if (!check_map_prealloc(map)) return false; if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) return false; return true; } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* * Validate that trace type programs use preallocated hash maps. * * For programs attached to PERF events this is mandatory as the * perf NMI can hit any arbitrary code sequence. * * All other trace types using preallocated hash maps are unsafe as * well because tracepoint or kprobes can be inside locked regions * of the memory allocator or at a place where a recursion into the * memory allocator would see inconsistent state. * * On RT enabled kernels run-time allocation of all trace type * programs is strictly prohibited due to lock type constraints. On * !RT kernels it is allowed for backwards compatibility reasons for * now, but warnings are emitted so developers are made aware of * the unsafety and can fix their programs before this is enforced. */ if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) { if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (IS_ENABLED(CONFIG_PREEMPT_RT)) { verbose(env, "trace type programs can only use preallocated hash map\n"); return -EINVAL; } WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); } if ((is_tracing_prog_type(prog->type) || prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && map_value_has_spin_lock(map)) { verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); return -EINVAL; } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { verbose(env, "bpf_struct_ops map cannot be used in prog\n"); return -EINVAL; } return 0; } static bool bpf_map_is_cgroup_storage(struct bpf_map *map) { return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_insn_aux_data *aux; struct bpf_map *map; struct fd f; u64 addr; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn[0].src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; /* In final convert_pseudo_ld_imm64() step, this is * converted into regular 64-bit imm load insn. */ if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || (insn[0].src_reg == BPF_PSEUDO_MAP_FD && insn[1].imm != 0)) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn[0].imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn[0].imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } aux = &env->insn_aux_data[i]; if (insn->src_reg == BPF_PSEUDO_MAP_FD) { addr = (unsigned long)map; } else { u32 off = insn[1].imm; if (off >= BPF_MAX_VAR_OFF) { verbose(env, "direct value offset of %u is not allowed\n", off); fdput(f); return -EINVAL; } if (!map->ops->map_direct_value_addr) { verbose(env, "no direct value access support for this map type\n"); fdput(f); return -EINVAL; } err = map->ops->map_direct_value_addr(map, &addr, off); if (err) { verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", map->value_size, off); fdput(f); return err; } aux->map_off = off; addr += off; } insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) { if (env->used_maps[j] == map) { aux->map_index = j; fdput(f); goto next_insn; } } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ bpf_map_inc(map); aux->map_index = env->used_map_cnt; env->used_maps[env->used_map_cnt++] = map; if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog->aux, map)) { verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; } fdput(f); next_insn: insn++; i++; continue; } /* Basic sanity check before we invest more work here. */ if (!bpf_opcode_in_insntable(insn->code)) { verbose(env, "unknown opcode %02x\n", insn->code); return -EINVAL; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { __bpf_free_used_maps(env->prog->aux, env->used_maps, env->used_map_cnt); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, struct bpf_prog *new_prog, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; struct bpf_insn *insn = new_prog->insnsi; u32 prog_len; int i; /* aux info at OFF always needs adjustment, no matter fast path * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the * original insn at old prog. */ old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); if (cnt == 1) return 0; prog_len = new_prog->len; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) { new_data[i].seen = env->pass_cnt; new_data[i].zext_dst = insn_has_def32(env, insn + i); } env->insn_aux_data = new_data; vfree(old_data); return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) { int i; if (len == 1) return; /* NOTE: fake 'exit' subprog should be updated as well. */ for (i = 0; i <= env->subprog_cnt; i++) { if (env->subprog_info[i].start <= off) continue; env->subprog_info[i].start += len - 1; } } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (IS_ERR(new_prog)) { if (PTR_ERR(new_prog) == -ERANGE) verbose(env, "insn %d cannot be patched due to 16-bit range\n", env->insn_aux_data[off].orig_idx); return NULL; } if (adjust_insn_aux_data(env, new_prog, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; } static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, u32 off, u32 cnt) { int i, j; /* find first prog starting at or after off (first to remove) */ for (i = 0; i < env->subprog_cnt; i++) if (env->subprog_info[i].start >= off) break; /* find first prog starting at or after off + cnt (first to stay) */ for (j = i; j < env->subprog_cnt; j++) if (env->subprog_info[j].start >= off + cnt) break; /* if j doesn't start exactly at off + cnt, we are just removing * the front of previous prog */ if (env->subprog_info[j].start != off + cnt) j--; if (j > i) { struct bpf_prog_aux *aux = env->prog->aux; int move; /* move fake 'exit' subprog as well */ move = env->subprog_cnt + 1 - j; memmove(env->subprog_info + i, env->subprog_info + j, sizeof(*env->subprog_info) * move); env->subprog_cnt -= j - i; /* remove func_info */ if (aux->func_info) { move = aux->func_info_cnt - j; memmove(aux->func_info + i, aux->func_info + j, sizeof(*aux->func_info) * move); aux->func_info_cnt -= j - i; /* func_info->insn_off is set after all code rewrites, * in adjust_btf_func() - no need to adjust */ } } else { /* convert i from "first prog to remove" to "first to adjust" */ if (env->subprog_info[i].start == off) i++; } /* update fake 'exit' subprog as well */ for (; i <= env->subprog_cnt; i++) env->subprog_info[i].start -= cnt; return 0; } static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, u32 cnt) { struct bpf_prog *prog = env->prog; u32 i, l_off, l_cnt, nr_linfo; struct bpf_line_info *linfo; nr_linfo = prog->aux->nr_linfo; if (!nr_linfo) return 0; linfo = prog->aux->linfo; /* find first line info to remove, count lines to be removed */ for (i = 0; i < nr_linfo; i++) if (linfo[i].insn_off >= off) break; l_off = i; l_cnt = 0; for (; i < nr_linfo; i++) if (linfo[i].insn_off < off + cnt) l_cnt++; else break; /* First live insn doesn't match first live linfo, it needs to "inherit" * last removed linfo. prog is already modified, so prog->len == off * means no live instructions after (tail of the program was removed). */ if (prog->len != off && l_cnt && (i == nr_linfo || linfo[i].insn_off != off + cnt)) { l_cnt--; linfo[--i].insn_off = off + cnt; } /* remove the line info which refer to the removed instructions */ if (l_cnt) { memmove(linfo + l_off, linfo + i, sizeof(*linfo) * (nr_linfo - i)); prog->aux->nr_linfo -= l_cnt; nr_linfo = prog->aux->nr_linfo; } /* pull all linfo[i].insn_off >= off + cnt in by cnt */ for (i = l_off; i < nr_linfo; i++) linfo[i].insn_off -= cnt; /* fix up all subprogs (incl. 'exit') which start >= off */ for (i = 0; i <= env->subprog_cnt; i++) if (env->subprog_info[i].linfo_idx > l_off) { /* program may have started in the removed region but * may not be fully removed */ if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) env->subprog_info[i].linfo_idx -= l_cnt; else env->subprog_info[i].linfo_idx = l_off; } return 0; } static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; unsigned int orig_prog_len = env->prog->len; int err; if (bpf_prog_is_dev_bound(env->prog->aux)) bpf_prog_offload_remove_insns(env, off, cnt); err = bpf_remove_insns(env->prog, off, cnt); if (err) return err; err = adjust_subprog_starts_after_remove(env, off, cnt); if (err) return err; err = bpf_adj_linfo_after_remove(env, off, cnt); if (err) return err; memmove(aux_data + off, aux_data + off + cnt, sizeof(*aux_data) * (orig_prog_len - off - cnt)); return 0; } /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code * with 'ja -1'. * * Just nops are not optimal, e.g. if they would sit at the end of the * program and through another bug we would manage to jump there, then * we'd execute beyond program memory otherwise. Returning exception * code also wouldn't work since we can have subprogs where the dead * code could be located. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } } static bool insn_is_cond_jump(u8 code) { u8 op; if (BPF_CLASS(code) == BPF_JMP32) return true; if (BPF_CLASS(code) != BPF_JMP) return false; op = BPF_OP(code); return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; } static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) { if (!insn_is_cond_jump(insn->code)) continue; if (!aux_data[i + 1].seen) ja.off = insn->off; else if (!aux_data[i + 1 + insn->off].seen) ja.off = 0; else continue; if (bpf_prog_is_dev_bound(env->prog->aux)) bpf_prog_offload_replace_insn(env, i, &ja); memcpy(insn, &ja, sizeof(ja)); } } static int opt_remove_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; int insn_cnt = env->prog->len; int i, err; for (i = 0; i < insn_cnt; i++) { int j; j = 0; while (i + j < insn_cnt && !aux_data[i + j].seen) j++; if (!j) continue; err = verifier_remove_insns(env, i, j); if (err) return err; insn_cnt = env->prog->len; } return 0; } static int opt_remove_nops(struct bpf_verifier_env *env) { const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, err; for (i = 0; i < insn_cnt; i++) { if (memcmp(&insn[i], &ja, sizeof(ja))) continue; err = verifier_remove_insns(env, i, 1); if (err) return err; insn_cnt--; i--; } return 0; } static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, const union bpf_attr *attr) { struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; struct bpf_insn_aux_data *aux = env->insn_aux_data; int i, patch_len, delta = 0, len = env->prog->len; struct bpf_insn *insns = env->prog->insnsi; struct bpf_prog *new_prog; bool rnd_hi32; rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; zext_patch[1] = BPF_ZEXT_REG(0); rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); for (i = 0; i < len; i++) { int adj_idx = i + delta; struct bpf_insn insn; insn = insns[adj_idx]; if (!aux[adj_idx].zext_dst) { u8 code, class; u32 imm_rnd; if (!rnd_hi32) continue; code = insn.code; class = BPF_CLASS(code); if (insn_no_def(&insn)) continue; /* NOTE: arg "reg" (the fourth one) is only used for * BPF_STX which has been ruled out in above * check, it is safe to pass NULL here. */ if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { if (class == BPF_LD && BPF_MODE(code) == BPF_IMM) i++; continue; } /* ctx load could be transformed into wider load. */ if (class == BPF_LDX && aux[adj_idx].ptr_type == PTR_TO_CTX) continue; imm_rnd = get_random_int(); rnd_hi32_patch[0] = insn; rnd_hi32_patch[1].imm = imm_rnd; rnd_hi32_patch[3].dst_reg = insn.dst_reg; patch = rnd_hi32_patch; patch_len = 4; goto apply_patch_buffer; } if (!bpf_jit_needs_zext()) continue; zext_patch[0] = insn; zext_patch[1].dst_reg = insn.dst_reg; zext_patch[1].src_reg = insn.dst_reg; patch = zext_patch; patch_len = 2; apply_patch_buffer: new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); if (!new_prog) return -ENOMEM; env->prog = new_prog; insns = new_prog->insnsi; aux = env->insn_aux_data; delta += patch_len - 1; } return 0; } /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff * struct bpf_sock_ops -> struct sock */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; u32 target_size, size_default, off; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; if (ops->gen_prologue || env->seen_direct_write) { if (!ops->gen_prologue) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (type == BPF_WRITE && env->insn_aux_data[i + delta].sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, * since it's only using frame pointer and immediate * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, env->insn_aux_data[i + delta].sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ *insn, }; cnt = ARRAY_SIZE(patch); new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } switch (env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) continue; convert_ctx_access = ops->convert_ctx_access; break; case PTR_TO_SOCKET: case PTR_TO_SOCK_COMMON: convert_ctx_access = bpf_sock_convert_ctx_access; break; case PTR_TO_TCP_SOCK: convert_ctx_access = bpf_tcp_sock_convert_ctx_access; break; case PTR_TO_XDP_SOCK: convert_ctx_access = bpf_xdp_sock_convert_ctx_access; break; case PTR_TO_BTF_ID: if (type == BPF_READ) { insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code); env->prog->aux->num_exentries++; } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { verbose(env, "Writes through BTF pointers are not allowed\n"); return -EINVAL; } continue; default: continue; } ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; size_default = bpf_ctx_off_adjust_machine(ctx_field_size); off = insn->off; if (is_narrower_load) { u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(size_default - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { u8 shift = bpf_ctx_narrow_access_offset( off, size, size_default) * 8; if (ctx_field_size <= 4) { if (shift) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } else { if (shift) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift); insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1ULL << size * 8) - 1); } } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; int err, num_exentries; if (env->subprog_cnt <= 1) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1); return -EFAULT; } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ insn->off = subprog; /* remember original imm in case JIT fails and fallback * to interpreter will be needed */ env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; } err = bpf_prog_alloc_jited_linfo(prog); if (err) goto out_undo_insn; err = -ENOMEM; func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; subprog_end = env->subprog_info[i + 1].start; len = subprog_end - subprog_start; /* BPF_PROG_RUN doesn't call subprogs directly, * hence main prog stats include the runtime of subprogs. * subprogs don't have IDs and not reachable via prog_get_next_id * func[i]->aux->stats will never be accessed and stays NULL */ func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); if (!func[i]) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; if (bpf_prog_calc_tag(func[i])) goto out_free; func[i]->is_func = 1; func[i]->aux->func_idx = i; /* the btf and func_info will be freed only at prog->aux */ func[i]->aux->btf = prog->aux->btf; func[i]->aux->func_info = prog->aux->func_info; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i]->aux->linfo = prog->aux->linfo; func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; num_exentries = 0; insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && BPF_MODE(insn->code) == BPF_PROBE_MEM) num_exentries++; } func[i]->aux->num_exentries = num_exentries; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; } cond_resched(); } /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT */ for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; subprog = insn->off; insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - __bpf_call_base; } /* we use the aux data to keep a list of the start addresses * of the JITed images for each function in the program * * for some architectures, such as powerpc64, the imm field * might not be large enough to hold the offset of the start * address of the callee's JITed image from __bpf_call_base * * in such cases, we can lookup the start address of a callee * by using its subprog id, available from the off field of * the call instruction, as an index for this list */ func[i]->aux->func = func; func[i]->aux->func_cnt = env->subprog_cnt; } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; goto out_free; } cond_resched(); } /* finally lock prog and jit images for all functions and * populate kallsysm */ for (i = 0; i < env->subprog_cnt; i++) { bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); insn->imm = subprog; } prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; prog->aux->func_cnt = env->subprog_cnt; bpf_prog_free_unused_jited_linfo(prog); return 0; out_free: for (i = 0; i < env->subprog_cnt; i++) if (func[i]) bpf_jit_free(func[i]); kfree(func); out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; } bpf_prog_free_jited_linfo(prog); return err; } static int fixup_call_args(struct bpf_verifier_env *env) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; int i, depth; #endif int err = 0; if (env->prog->jit_requested && !bpf_prog_is_dev_bound(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; if (err == -EFAULT) return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) return depth; bpf_patch_call_args(insn, depth); } err = 0; #endif return err; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; bool expect_blinding = bpf_jit_blinding_enabled(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, ret, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn insn_buf[16]; struct bpf_insn *patch = &insn_buf[0]; bool issrc, isneg; u32 off_reg; aux = &env->insn_aux_data[i + delta]; if (!aux->alu_state || aux->alu_state == BPF_ALU_NON_POINTER) continue; isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; off_reg = issrc ? insn->src_reg : insn->dst_reg; if (isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); if (issrc) { *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); insn->src_reg = BPF_REG_AX; } else { *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, BPF_REG_AX); } if (isneg) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; if (issrc && isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (env->bpf_capable && !expect_blinding && prog->jit_requested && !bpf_map_key_poisoned(aux) && !bpf_map_ptr_poisoned(aux) && !bpf_map_ptr_unpriv(aux)) { struct bpf_jit_poke_descriptor desc = { .reason = BPF_POKE_REASON_TAIL_CALL, .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), .tail_call.key = bpf_map_key_immediate(aux), }; ret = bpf_jit_add_poke_descriptor(prog, &desc); if (ret < 0) { verbose(env, "adding tail call poke descriptor failed\n"); return ret; } insn->imm = ret + 1; continue; } if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_ptr_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem || insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_ptr_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_push_elem, (int (*)(struct bpf_map *map, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_pop_elem, (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map *map, void *value))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; case BPF_FUNC_map_push_elem: insn->imm = BPF_CAST_CALL(ops->map_push_elem) - __bpf_call_base; continue; case BPF_FUNC_map_pop_elem: insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - __bpf_call_base; continue; case BPF_FUNC_map_peek_elem: insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - __bpf_call_base; continue; } goto patch_call_imm; } if (prog->jit_requested && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_jiffies64) { struct bpf_insn ld_jiffies_addr[2] = { BPF_LD_IMM64(BPF_REG_0, (unsigned long)&jiffies), }; insn_buf[0] = ld_jiffies_addr[0]; insn_buf[1] = ld_jiffies_addr[1]; insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } /* Since poke tab is now finalized, publish aux to tracker. */ for (i = 0; i < prog->aux->size_poke_tab; i++) { map_ptr = prog->aux->poke_tab[i].tail_call.map; if (!map_ptr->ops->map_poke_track || !map_ptr->ops->map_poke_untrack || !map_ptr->ops->map_poke_run) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); if (ret < 0) { verbose(env, "tracking tail call prog failed\n"); return ret; } } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; sl = env->free_list; while (sl) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } env->free_list = NULL; if (!env->explored_states) return; for (i = 0; i < state_htab_size(env); i++) { sl = env->explored_states[i]; while (sl) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } env->explored_states[i] = NULL; } } /* The verifier is using insn_aux_data[] to store temporary data during * verification and to store information for passes that run after the * verification like dead code sanitization. do_check_common() for subprogram N * may analyze many other subprograms. sanitize_insn_aux_data() clears all * temporary data after do_check_common() finds that subprogram N cannot be * verified independently. pass_cnt counts the number of times * do_check_common() was run and insn->aux->seen tells the pass number * insn_aux_data was touched. These variables are compared to clear temporary * data from failed pass. For testing and experiments do_check_common() can be * run multiple times even when prior attempt to verify is unsuccessful. */ static void sanitize_insn_aux_data(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; struct bpf_insn_aux_data *aux; int i, class; for (i = 0; i < env->prog->len; i++) { class = BPF_CLASS(insn[i].code); if (class != BPF_LDX && class != BPF_STX) continue; aux = &env->insn_aux_data[i]; if (aux->seen != env->pass_cnt) continue; memset(aux, 0, offsetof(typeof(*aux), orig_idx)); } } static int do_check_common(struct bpf_verifier_env *env, int subprog) { bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); struct bpf_verifier_state *state; struct bpf_reg_state *regs; int ret, i; env->prev_linfo = NULL; env->pass_cnt++; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; state->curframe = 0; state->speculative = false; state->branches = 1; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); return -ENOMEM; } env->cur_state = state; init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, subprog); regs = state->frame[state->curframe]->regs; if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { ret = btf_prepare_func_args(env, subprog, regs); if (ret) goto out; for (i = BPF_REG_1; i <= BPF_REG_5; i++) { if (regs[i].type == PTR_TO_CTX) mark_reg_known_zero(env, regs, i); else if (regs[i].type == SCALAR_VALUE) mark_reg_unknown(env, regs, i); } } else { /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); ret = btf_check_func_arg_match(env, subprog, regs); if (ret == -EFAULT) /* unlikely verifier bug. abort. * ret == 0 and ret < 0 are sadly acceptable for * main() function due to backward compatibility. * Like socket filter program may be written as: * int bpf_prog(struct pt_regs *ctx) * and never dereference that ctx in the program. * 'struct pt_regs' is a type mismatch for socket * filter that should be using 'struct __sk_buff'. */ goto out; } ret = do_check(env); out: /* check for NULL is necessary, since cur_state can be freed inside * do_check() under memory pressure. */ if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } while (!pop_stack(env, NULL, NULL, false)); if (!ret && pop_log) bpf_vlog_reset(&env->log, 0); free_states(env); if (ret) /* clean aux data in case subprog was rejected */ sanitize_insn_aux_data(env); return ret; } /* Verify all global functions in a BPF program one by one based on their BTF. * All global functions must pass verification. Otherwise the whole program is rejected. * Consider: * int bar(int); * int foo(int f) * { * return bar(f); * } * int bar(int b) * { * ... * } * foo() will be verified first for R1=any_scalar_value. During verification it * will be assumed that bar() already verified successfully and call to bar() * from foo() will be checked for type match only. Later bar() will be verified * independently to check that it's safe for R1=any_scalar_value. */ static int do_check_subprogs(struct bpf_verifier_env *env) { struct bpf_prog_aux *aux = env->prog->aux; int i, ret; if (!aux->func_info) return 0; for (i = 1; i < env->subprog_cnt; i++) { if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) continue; env->insn_idx = env->subprog_info[i].start; WARN_ON_ONCE(env->insn_idx == 0); ret = do_check_common(env, i); if (ret) { return ret; } else if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "Func#%d is safe for any args that match its prototype\n", i); } } return 0; } static int do_check_main(struct bpf_verifier_env *env) { int ret; env->insn_idx = 0; ret = do_check_common(env, 0); if (!ret) env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; return ret; } static void print_verification_stats(struct bpf_verifier_env *env) { int i; if (env->log.level & BPF_LOG_STATS) { verbose(env, "verification time %lld usec\n", div_u64(env->verification_time, 1000)); verbose(env, "stack depth "); for (i = 0; i < env->subprog_cnt; i++) { u32 depth = env->subprog_info[i].stack_depth; verbose(env, "%d", depth); if (i + 1 < env->subprog_cnt) verbose(env, "+"); } verbose(env, "\n"); } verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " "total_states %d peak_states %d mark_read %d\n", env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, env->max_states_per_insn, env->total_states, env->peak_states, env->longest_mark_read_walk); } static int check_struct_ops_btf_id(struct bpf_verifier_env *env) { const struct btf_type *t, *func_proto; const struct bpf_struct_ops *st_ops; const struct btf_member *member; struct bpf_prog *prog = env->prog; u32 btf_id, member_idx; const char *mname; btf_id = prog->aux->attach_btf_id; st_ops = bpf_struct_ops_find(btf_id); if (!st_ops) { verbose(env, "attach_btf_id %u is not a supported struct\n", btf_id); return -ENOTSUPP; } t = st_ops->type; member_idx = prog->expected_attach_type; if (member_idx >= btf_type_vlen(t)) { verbose(env, "attach to invalid member idx %u of struct %s\n", member_idx, st_ops->name); return -EINVAL; } member = &btf_type_member(t)[member_idx]; mname = btf_name_by_offset(btf_vmlinux, member->name_off); func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL); if (!func_proto) { verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", mname, member_idx, st_ops->name); return -EINVAL; } if (st_ops->check_member) { int err = st_ops->check_member(t, member); if (err) { verbose(env, "attach to unsupported member %s of struct %s\n", mname, st_ops->name); return err; } } prog->aux->attach_func_proto = func_proto; prog->aux->attach_func_name = mname; env->ops = st_ops->verifier_ops; return 0; } #define SECURITY_PREFIX "security_" static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr) { if (within_error_injection_list(addr) || !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, sizeof(SECURITY_PREFIX) - 1)) return 0; return -EINVAL; } static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; struct bpf_prog *tgt_prog = prog->aux->linked_prog; u32 btf_id = prog->aux->attach_btf_id; const char prefix[] = "btf_trace_"; struct btf_func_model fmodel; int ret = 0, subprog = -1, i; struct bpf_trampoline *tr; const struct btf_type *t; bool conservative = true; const char *tname; struct btf *btf; long addr; u64 key; if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) return check_struct_ops_btf_id(env); if (prog->type != BPF_PROG_TYPE_TRACING && prog->type != BPF_PROG_TYPE_LSM && !prog_extension) return 0; if (!btf_id) { verbose(env, "Tracing programs must provide btf_id\n"); return -EINVAL; } btf = bpf_prog_get_target_btf(prog); if (!btf) { verbose(env, "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); return -EINVAL; } t = btf_type_by_id(btf, btf_id); if (!t) { verbose(env, "attach_btf_id %u is invalid\n", btf_id); return -EINVAL; } tname = btf_name_by_offset(btf, t->name_off); if (!tname) { verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); return -EINVAL; } if (tgt_prog) { struct bpf_prog_aux *aux = tgt_prog->aux; for (i = 0; i < aux->func_info_cnt; i++) if (aux->func_info[i].type_id == btf_id) { subprog = i; break; } if (subprog == -1) { verbose(env, "Subprog %s doesn't exist\n", tname); return -EINVAL; } conservative = aux->func_info_aux[subprog].unreliable; if (prog_extension) { if (conservative) { verbose(env, "Cannot replace static functions\n"); return -EINVAL; } if (!prog->jit_requested) { verbose(env, "Extension programs should be JITed\n"); return -EINVAL; } env->ops = bpf_verifier_ops[tgt_prog->type]; prog->expected_attach_type = tgt_prog->expected_attach_type; } if (!tgt_prog->jited) { verbose(env, "Can attach to only JITed progs\n"); return -EINVAL; } if (tgt_prog->type == prog->type) { /* Cannot fentry/fexit another fentry/fexit program. * Cannot attach program extension to another extension. * It's ok to attach fentry/fexit to extension program. */ verbose(env, "Cannot recursively attach\n"); return -EINVAL; } if (tgt_prog->type == BPF_PROG_TYPE_TRACING && prog_extension && (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { /* Program extensions can extend all program types * except fentry/fexit. The reason is the following. * The fentry/fexit programs are used for performance * analysis, stats and can be attached to any program * type except themselves. When extension program is * replacing XDP function it is necessary to allow * performance analysis of all functions. Both original * XDP program and its program extension. Hence * attaching fentry/fexit to BPF_PROG_TYPE_EXT is * allowed. If extending of fentry/fexit was allowed it * would be possible to create long call chain * fentry->extension->fentry->extension beyond * reasonable stack size. Hence extending fentry is not * allowed. */ verbose(env, "Cannot extend fentry/fexit\n"); return -EINVAL; } key = ((u64)aux->id) << 32 | btf_id; } else { if (prog_extension) { verbose(env, "Cannot replace kernel functions\n"); return -EINVAL; } key = btf_id; } switch (prog->expected_attach_type) { case BPF_TRACE_RAW_TP: if (tgt_prog) { verbose(env, "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); return -EINVAL; } if (!btf_type_is_typedef(t)) { verbose(env, "attach_btf_id %u is not a typedef\n", btf_id); return -EINVAL; } if (strncmp(prefix, tname, sizeof(prefix) - 1)) { verbose(env, "attach_btf_id %u points to wrong type name %s\n", btf_id, tname); return -EINVAL; } tname += sizeof(prefix) - 1; t = btf_type_by_id(btf, t->type); if (!btf_type_is_ptr(t)) /* should never happen in valid vmlinux build */ return -EINVAL; t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) /* should never happen in valid vmlinux build */ return -EINVAL; /* remember two read only pointers that are valid for * the life time of the kernel */ prog->aux->attach_func_name = tname; prog->aux->attach_func_proto = t; prog->aux->attach_btf_trace = true; return 0; case BPF_TRACE_ITER: if (!btf_type_is_func(t)) { verbose(env, "attach_btf_id %u is not a function\n", btf_id); return -EINVAL; } t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) return -EINVAL; prog->aux->attach_func_name = tname; prog->aux->attach_func_proto = t; if (!bpf_iter_prog_supported(prog)) return -EINVAL; ret = btf_distill_func_proto(&env->log, btf, t, tname, &fmodel); return ret; default: if (!prog_extension) return -EINVAL; fallthrough; case BPF_MODIFY_RETURN: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: prog->aux->attach_func_name = tname; if (prog->type == BPF_PROG_TYPE_LSM) { ret = bpf_lsm_verify_prog(&env->log, prog); if (ret < 0) return ret; } if (!btf_type_is_func(t)) { verbose(env, "attach_btf_id %u is not a function\n", btf_id); return -EINVAL; } if (prog_extension && btf_check_type_match(env, prog, btf, t)) return -EINVAL; t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) return -EINVAL; tr = bpf_trampoline_lookup(key); if (!tr) return -ENOMEM; /* t is either vmlinux type or another program's type */ prog->aux->attach_func_proto = t; mutex_lock(&tr->mutex); if (tr->func.addr) { prog->aux->trampoline = tr; goto out; } if (tgt_prog && conservative) { prog->aux->attach_func_proto = NULL; t = NULL; } ret = btf_distill_func_proto(&env->log, btf, t, tname, &tr->func.model); if (ret < 0) goto out; if (tgt_prog) { if (subprog == 0) addr = (long) tgt_prog->bpf_func; else addr = (long) tgt_prog->aux->func[subprog]->bpf_func; } else { addr = kallsyms_lookup_name(tname); if (!addr) { verbose(env, "The address of function %s cannot be found\n", tname); ret = -ENOENT; goto out; } } if (prog->expected_attach_type == BPF_MODIFY_RETURN) { ret = check_attach_modify_return(prog, addr); if (ret) verbose(env, "%s() is not modifiable\n", prog->aux->attach_func_name); } if (ret) goto out; tr->func.addr = (void *)addr; prog->aux->trampoline = tr; out: mutex_unlock(&tr->mutex); if (ret) bpf_trampoline_put(tr); return ret; } } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, union bpf_attr __user *uattr) { u64 start_time = ktime_get_ns(); struct bpf_verifier_env *env; struct bpf_verifier_log *log; int i, len, ret = -EINVAL; bool is_priv; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; len = (*prog)->len; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; for (i = 0; i < len; i++) env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; is_priv = bpf_capable(); if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { mutex_lock(&bpf_verifier_lock); if (!btf_vmlinux) btf_vmlinux = btf_parse_vmlinux(); mutex_unlock(&bpf_verifier_lock); } /* grab the mutex to protect few globals used by verifier */ if (!is_priv) mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) goto err_unlock; } if (IS_ERR(btf_vmlinux)) { /* Either gcc or pahole or kernel are broken. */ verbose(env, "in-kernel BTF is malformed\n"); ret = PTR_ERR(btf_vmlinux); goto skip_full_check; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) env->strict_alignment = false; env->allow_ptr_leaks = bpf_allow_ptr_leaks(); env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); env->bypass_spec_v1 = bpf_bypass_spec_v1(); env->bypass_spec_v4 = bpf_bypass_spec_v4(); env->bpf_capable = bpf_capable(); if (is_priv) env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env->prog); if (ret) goto skip_full_check; } env->explored_states = kvcalloc(state_htab_size(env), sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_subprogs(env); if (ret < 0) goto skip_full_check; ret = check_btf_info(env, attr, uattr); if (ret < 0) goto skip_full_check; ret = check_attach_btf_id(env); if (ret) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; ret = do_check_subprogs(env); ret = ret ?: do_check_main(env); if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) ret = bpf_prog_offload_finalize(env); skip_full_check: kvfree(env->explored_states); if (ret == 0) ret = check_max_stack_depth(env); /* instruction rewrites happen after this point */ if (is_priv) { if (ret == 0) opt_hard_wire_dead_code_branches(env); if (ret == 0) ret = opt_remove_dead_code(env); if (ret == 0) ret = opt_remove_nops(env); } else { if (ret == 0) sanitize_dead_code(env); } if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); /* do 32-bit optimization after insn patching has done so those patched * insns could be handled correctly. */ if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret : false; } if (ret == 0) ret = fixup_call_args(env); env->verification_time = ktime_get_ns() - start_time; print_verification_stats(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } if (ret == 0) adjust_btf_func(env); err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_used_maps() will release them. */ release_maps(env); /* extension progs temporarily inherit the attach_type of their targets for verification purposes, so set it back to zero before returning */ if (env->prog->type == BPF_PROG_TYPE_EXT) env->prog->expected_attach_type = 0; *prog = env->prog; err_unlock: if (!is_priv) mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4409_0
crossvul-cpp_data_good_3111_1
/********************************************************************** * $Id$ * * Project: MapServer * Purpose: OGC Filter Encoding implementation * Author: Y. Assefa, DM Solutions Group (assefa@dmsolutions.ca) * ********************************************************************** * Copyright (c) 2003, Y. Assefa, DM Solutions Group Inc * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies of this Software or works derived from this Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ****************************************************************************/ #ifdef USE_OGR #include "cpl_minixml.h" #endif #include "mapogcfilter.h" #include "mapserver.h" #include "mapows.h" #include "mapowscommon.h" #ifdef USE_OGR char *FLTGetIsLikeComparisonCommonExpression(FilterEncodingNode *psFilterNode) { const size_t bufferSize = 1024; char szBuffer[1024]; char szTmp[256]; char *pszValue = NULL; const char *pszWild = NULL; const char *pszSingle = NULL; const char *pszEscape = NULL; int bCaseInsensitive = 0; FEPropertyIsLike* propIsLike; int nLength=0, i=0, iTmp=0; if (!psFilterNode || !psFilterNode->pOther || !psFilterNode->psLeftNode || !psFilterNode->psRightNode || !psFilterNode->psRightNode->pszValue) return NULL; propIsLike = (FEPropertyIsLike *)psFilterNode->pOther; pszWild = propIsLike->pszWildCard; pszSingle = propIsLike->pszSingleChar; pszEscape = propIsLike->pszEscapeChar; bCaseInsensitive = propIsLike->bCaseInsensitive; if (!pszWild || strlen(pszWild) == 0 || !pszSingle || strlen(pszSingle) == 0 || !pszEscape || strlen(pszEscape) == 0) return NULL; /* -------------------------------------------------------------------- */ /* Use operand with regular expressions. */ /* -------------------------------------------------------------------- */ szBuffer[0] = '\0'; sprintf(szTmp, "%s", "(\"["); szTmp[4] = '\0'; strlcat(szBuffer, szTmp, bufferSize); /* attribute */ strlcat(szBuffer, psFilterNode->psLeftNode->pszValue, bufferSize); szBuffer[strlen(szBuffer)] = '\0'; /* #3521 */ if (bCaseInsensitive == 1) sprintf(szTmp, "%s", "]\" ~* \""); else sprintf(szTmp, "%s", "]\" ~ \""); szTmp[7] = '\0'; strlcat(szBuffer, szTmp, bufferSize); szBuffer[strlen(szBuffer)] = '\0'; pszValue = psFilterNode->psRightNode->pszValue; nLength = strlen(pszValue); if( 1 + 2 * nLength + 1 + 1 >= sizeof(szTmp) ) return NULL; iTmp =0; if (nLength > 0 && pszValue[0] != pszWild[0] && pszValue[0] != pszSingle[0] && pszValue[0] != pszEscape[0]) { szTmp[iTmp]= '^'; iTmp++; } for (i=0; i<nLength; i++) { if (pszValue[i] != pszWild[0] && pszValue[i] != pszSingle[0] && pszValue[i] != pszEscape[0]) { szTmp[iTmp] = pszValue[i]; iTmp++; szTmp[iTmp] = '\0'; } else if (pszValue[i] == pszSingle[0]) { szTmp[iTmp] = '.'; iTmp++; szTmp[iTmp] = '\0'; } else if (pszValue[i] == pszEscape[0]) { szTmp[iTmp] = '\\'; iTmp++; szTmp[iTmp] = '\0'; } else if (pszValue[i] == pszWild[0]) { szTmp[iTmp++] = '.'; szTmp[iTmp++] = '*'; szTmp[iTmp] = '\0'; } } szTmp[iTmp] = '"'; szTmp[++iTmp] = '\0'; strlcat(szBuffer, szTmp, bufferSize); strlcat(szBuffer, ")", bufferSize); return msStrdup(szBuffer); } char *FLTGetIsBetweenComparisonCommonExpresssion(FilterEncodingNode *psFilterNode, layerObj *lp) { const size_t bufferSize = 1024; char szBuffer[1024]; char **aszBounds = NULL; int nBounds = 0; int bString=0; int bDateTime = 0; char *pszExpression=NULL, *pszTmpEscaped; if (!psFilterNode || !(strcasecmp(psFilterNode->pszValue, "PropertyIsBetween") == 0)) return NULL; if (psFilterNode->psLeftNode == NULL || psFilterNode->psRightNode == NULL ) return NULL; /* -------------------------------------------------------------------- */ /* Get the bounds value which are stored like boundmin;boundmax */ /* -------------------------------------------------------------------- */ aszBounds = msStringSplit(psFilterNode->psRightNode->pszValue, ';', &nBounds); if (nBounds != 2) { msFreeCharArray(aszBounds, nBounds); return NULL; } /* -------------------------------------------------------------------- */ /* check if the value is a numeric value or alphanumeric. If it */ /* is alphanumeric, add quotes around attribute and values. */ /* -------------------------------------------------------------------- */ bString = 0; if (aszBounds[0]) { const char* pszType; snprintf(szBuffer, bufferSize, "%s_type", psFilterNode->psLeftNode->pszValue); pszType = msOWSLookupMetadata(&(lp->metadata), "OFG", szBuffer); if (pszType != NULL && (strcasecmp(pszType, "Character") == 0)) bString = 1; else if (pszType != NULL && (strcasecmp(pszType, "Date") == 0)) bDateTime = 1; else if (FLTIsNumeric(aszBounds[0]) == MS_FALSE) bString = 1; } if (!bString && !bDateTime) { if (aszBounds[1]) { if (FLTIsNumeric(aszBounds[1]) == MS_FALSE) bString = 1; } } /* -------------------------------------------------------------------- */ /* build expresssion. */ /* -------------------------------------------------------------------- */ /* attribute */ if (bString) sprintf(szBuffer, "%s", "(\"["); else sprintf(szBuffer, "%s", "(["); pszExpression = msStringConcatenate(pszExpression, szBuffer); pszExpression = msStringConcatenate(pszExpression, psFilterNode->psLeftNode->pszValue); if (bString) sprintf(szBuffer, "%s", "]\" "); else sprintf(szBuffer, "%s", "] "); pszExpression = msStringConcatenate(pszExpression, szBuffer); sprintf(szBuffer, "%s", " >= "); pszExpression = msStringConcatenate(pszExpression, szBuffer); if (bString) { pszExpression = msStringConcatenate(pszExpression, "\""); } else if (bDateTime) { pszExpression = msStringConcatenate(pszExpression, "`"); } pszTmpEscaped = msStringEscape(aszBounds[0]); snprintf(szBuffer, bufferSize, "%s", pszTmpEscaped); if(pszTmpEscaped != aszBounds[0] ) msFree(pszTmpEscaped); pszExpression = msStringConcatenate(pszExpression, szBuffer); if (bString) { pszExpression = msStringConcatenate(pszExpression, "\""); } else if (bDateTime) { pszExpression = msStringConcatenate(pszExpression, "`"); } sprintf(szBuffer, "%s", " AND "); pszExpression = msStringConcatenate(pszExpression, szBuffer); if (bString) sprintf(szBuffer, "%s", " \"["); else sprintf(szBuffer, "%s", " ["); pszExpression = msStringConcatenate(pszExpression, szBuffer); /* attribute */ pszExpression = msStringConcatenate(pszExpression, psFilterNode->psLeftNode->pszValue); if (bString) sprintf(szBuffer, "%s", "]\" "); else sprintf(szBuffer, "%s", "] "); pszExpression = msStringConcatenate(pszExpression, szBuffer); sprintf(szBuffer, "%s", " <= "); pszExpression = msStringConcatenate(pszExpression, szBuffer); if (bString) { pszExpression = msStringConcatenate(pszExpression, "\""); } else if (bDateTime) { pszExpression = msStringConcatenate(pszExpression, "`"); } pszTmpEscaped = msStringEscape(aszBounds[1]); snprintf(szBuffer, bufferSize, "%s", pszTmpEscaped); if (pszTmpEscaped != aszBounds[1]) msFree(pszTmpEscaped); pszExpression = msStringConcatenate(pszExpression, szBuffer); if (bString) { pszExpression = msStringConcatenate(pszExpression, "\""); } else if (bDateTime) { pszExpression = msStringConcatenate(pszExpression, "`"); } sprintf(szBuffer, "%s", ")"); pszExpression = msStringConcatenate(pszExpression, szBuffer); msFreeCharArray(aszBounds, nBounds); return pszExpression; } char *FLTGetBinaryComparisonCommonExpression(FilterEncodingNode *psFilterNode, layerObj *lp) { char szTmp[1024]; char *pszExpression = NULL, *pszTmpEscaped; int bString; int bDateTime; if (psFilterNode == NULL) return NULL; /* -------------------------------------------------------------------- */ /* check if the value is a numeric value or alphanumeric. If it */ /* is alphanumeric, add quotes around attribute and values. */ /* -------------------------------------------------------------------- */ bString = 0; bDateTime = 0; if (psFilterNode->psRightNode->pszValue) { const char* pszType; snprintf(szTmp, sizeof(szTmp), "%s_type", psFilterNode->psLeftNode->pszValue); pszType = msOWSLookupMetadata(&(lp->metadata), "OFG", szTmp); if (pszType!= NULL && (strcasecmp(pszType, "Character") == 0)) bString = 1; else if (pszType!= NULL && (strcasecmp(pszType, "Date") == 0)) bDateTime = 1; else if (FLTIsNumeric(psFilterNode->psRightNode->pszValue) == MS_FALSE) bString = 1; } /* specical case to be able to have empty strings in the expression. */ /* propertyislike is always treated as string */ if (psFilterNode->psRightNode->pszValue == NULL || strcasecmp(psFilterNode->pszValue, "PropertyIsLike") == 0) bString = 1; /* attribute */ if (bString) sprintf(szTmp, "%s", "(\"["); else sprintf(szTmp, "%s","(["); pszExpression = msStringConcatenate(pszExpression, szTmp); pszExpression = msStringConcatenate(pszExpression, psFilterNode->psLeftNode->pszValue); if (bString) sprintf(szTmp, "%s","]\" "); else sprintf(szTmp, "%s", "] "); pszExpression = msStringConcatenate(pszExpression, szTmp); if (strcasecmp(psFilterNode->pszValue, "PropertyIsEqualTo") == 0) { /* case insensitive set ? */ if (psFilterNode->psRightNode->pOther && (*(int *)psFilterNode->psRightNode->pOther) == 1) sprintf(szTmp, "%s", "=*"); else sprintf(szTmp, "%s", "="); } else if (strcasecmp(psFilterNode->pszValue, "PropertyIsNotEqualTo") == 0) sprintf(szTmp, "%s", "!="); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsLessThan") == 0) sprintf(szTmp, "%s", "<"); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsGreaterThan") == 0) sprintf(szTmp, "%s", ">"); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsLessThanOrEqualTo") == 0) sprintf(szTmp, "%s", "<="); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsGreaterThanOrEqualTo") == 0) sprintf(szTmp, "%s", ">="); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsLike") == 0) sprintf(szTmp, "%s", "~"); pszExpression = msStringConcatenate(pszExpression, szTmp); pszExpression = msStringConcatenate(pszExpression, " "); /* value */ if (bString) { sprintf(szTmp, "%s", "\""); pszExpression = msStringConcatenate(pszExpression, szTmp); } else if (bDateTime) { sprintf(szTmp, "%s", "`"); pszExpression = msStringConcatenate(pszExpression, szTmp); } if (psFilterNode->psRightNode->pszValue) { pszTmpEscaped = msStringEscape(psFilterNode->psRightNode->pszValue); pszExpression = msStringConcatenate(pszExpression, pszTmpEscaped); if(pszTmpEscaped != psFilterNode->psRightNode->pszValue ) msFree(pszTmpEscaped); } if (bString) { sprintf(szTmp, "%s", "\""); pszExpression = msStringConcatenate(pszExpression, szTmp); } else if (bDateTime) { sprintf(szTmp, "%s", "`"); pszExpression = msStringConcatenate(pszExpression, szTmp); } sprintf(szTmp, "%s", ")"); pszExpression = msStringConcatenate(pszExpression, szTmp); return pszExpression; } char *FLTGetLogicalComparisonCommonExpression(FilterEncodingNode *psFilterNode, layerObj *lp) { char *pszExpression = NULL; char *pszTmp = NULL; if (!psFilterNode || !FLTIsLogicalFilterType(psFilterNode->pszValue)) return NULL; /* -------------------------------------------------------------------- */ /* OR and AND */ /* -------------------------------------------------------------------- */ if (psFilterNode->psLeftNode && psFilterNode->psRightNode) { pszTmp = FLTGetCommonExpression(psFilterNode->psLeftNode, lp); if (!pszTmp) return NULL; pszExpression = msStringConcatenate(pszExpression, "("); pszExpression = msStringConcatenate(pszExpression, pszTmp); msFree(pszTmp); pszExpression = msStringConcatenate(pszExpression, " "); pszExpression = msStringConcatenate(pszExpression, psFilterNode->pszValue); pszExpression = msStringConcatenate(pszExpression, " "); pszTmp = FLTGetCommonExpression(psFilterNode->psRightNode, lp); if (!pszTmp) { msFree(pszExpression); return NULL; } pszExpression = msStringConcatenate(pszExpression, pszTmp); msFree(pszTmp); pszExpression = msStringConcatenate(pszExpression, ")"); } /* -------------------------------------------------------------------- */ /* NOT */ /* -------------------------------------------------------------------- */ else if (psFilterNode->psLeftNode && strcasecmp(psFilterNode->pszValue, "NOT") == 0) { pszTmp = FLTGetCommonExpression(psFilterNode->psLeftNode, lp); if (!pszTmp) return NULL; pszExpression = msStringConcatenate(pszExpression, "(NOT "); pszExpression = msStringConcatenate(pszExpression, pszTmp); msFree(pszTmp); pszExpression = msStringConcatenate(pszExpression, ")"); } return pszExpression; } char *FLTGetSpatialComparisonCommonExpression(FilterEncodingNode *psNode, layerObj *lp) { char *pszExpression = NULL; shapeObj *psQueryShape = NULL; double dfDistance = -1; int nUnit = -1, nLayerUnit = -1; char *pszWktText = NULL; char szBuffer[256]; char *pszTmp=NULL; projectionObj sProjTmp; rectObj sQueryRect; shapeObj *psTmpShape=NULL; int bBBoxQuery = 0; int bAlreadyReprojected = 0; if (psNode == NULL || lp == NULL) return NULL; if (psNode->eType != FILTER_NODE_TYPE_SPATIAL) return NULL; /* get the shape */ if (FLTIsBBoxFilter(psNode)) { char szPolygon[512]; FLTGetBBOX(psNode, &sQueryRect); snprintf(szPolygon, sizeof(szPolygon), "POLYGON((%.18f %.18f,%.18f %.18f,%.18f %.18f,%.18f %.18f,%.18f %.18f))", sQueryRect.minx, sQueryRect.miny, sQueryRect.minx, sQueryRect.maxy, sQueryRect.maxx, sQueryRect.maxy, sQueryRect.maxx, sQueryRect.miny, sQueryRect.minx, sQueryRect.miny); psTmpShape = msShapeFromWKT(szPolygon); /* ** This is a horrible hack to deal with world-extent requests and ** reprojection. msProjectRect() detects if reprojection from longlat to ** projected SRS, and in that case it transforms the bbox to -1e-15,-1e-15,1e15,1e15 ** to ensure that all features are returned. ** ** Make wfs_200_cite_filter_bbox_world.xml and wfs_200_cite_postgis_bbox_world.xml pass */ if (fabs(sQueryRect.minx - -180.0) < 1e-5 && fabs(sQueryRect.miny - -90.0) < 1e-5 && fabs(sQueryRect.maxx - 180.0) < 1e-5 && fabs(sQueryRect.maxy - 90.0) < 1e-5) { if (lp->projection.numargs > 0) { if (psNode->pszSRS) msInitProjection(&sProjTmp); if (psNode->pszSRS) { /* Use the non EPSG variant since axis swapping is done in FLTDoAxisSwappingIfNecessary */ if (msLoadProjectionString(&sProjTmp, psNode->pszSRS) == 0) { msProjectRect(&sProjTmp, &lp->projection, &sQueryRect); } } else if (lp->map->projection.numargs > 0) msProjectRect(&lp->map->projection, &lp->projection, &sQueryRect); if (psNode->pszSRS) msFreeProjection(&sProjTmp); } if (sQueryRect.minx <= -1e14) { msFreeShape(psTmpShape); msFree(psTmpShape); psTmpShape = (shapeObj*) msSmallMalloc(sizeof(shapeObj)); msInitShape(psTmpShape); msRectToPolygon(sQueryRect, psTmpShape); bAlreadyReprojected = 1; } } bBBoxQuery = 1; } else { /* other geos type operations */ /* project shape to layer projection. If the proj is not part of the filter query, assume that the cooredinates are in the map projection */ psQueryShape = FLTGetShape(psNode, &dfDistance, &nUnit); if ((strcasecmp(psNode->pszValue, "DWithin") == 0 || strcasecmp(psNode->pszValue, "Beyond") == 0 ) && dfDistance > 0) { nLayerUnit = lp->units; if(nLayerUnit == -1) nLayerUnit = GetMapserverUnitUsingProj(&lp->projection); if(nLayerUnit == -1) nLayerUnit = lp->map->units; if(nLayerUnit == -1) nLayerUnit = GetMapserverUnitUsingProj(&lp->map->projection); if (nUnit >= 0 && nUnit != nLayerUnit) dfDistance *= msInchesPerUnit(nUnit,0)/msInchesPerUnit(nLayerUnit,0); /* target is layer units */ } psTmpShape = psQueryShape; } if (psTmpShape) { /* ** target is layer projection */ if (!bAlreadyReprojected && lp->projection.numargs > 0) { if (psNode->pszSRS) msInitProjection(&sProjTmp); if (psNode->pszSRS) { /* Use the non EPSG variant since axis swapping is done in FLTDoAxisSwappingIfNecessary */ if (msLoadProjectionString(&sProjTmp, psNode->pszSRS) == 0) { msProjectShape(&sProjTmp, &lp->projection, psTmpShape); } } else if (lp->map->projection.numargs > 0) msProjectShape(&lp->map->projection, &lp->projection, psTmpShape); if (psNode->pszSRS) msFreeProjection(&sProjTmp); } /* function name */ if (bBBoxQuery) { sprintf(szBuffer, "%s", "intersects"); } else { if (strncasecmp(psNode->pszValue, "intersect", 9) == 0) sprintf(szBuffer, "%s", "intersects"); else { pszTmp = msStrdup(psNode->pszValue); msStringToLower(pszTmp); sprintf(szBuffer, "%s", pszTmp); msFree(pszTmp); } } pszExpression = msStringConcatenate(pszExpression, szBuffer); pszExpression = msStringConcatenate(pszExpression, "("); /* geometry binding */ sprintf(szBuffer, "%s", "[shape]"); pszExpression = msStringConcatenate(pszExpression, szBuffer); pszExpression = msStringConcatenate(pszExpression, ","); /* filter geometry */ pszWktText = msGEOSShapeToWKT(psTmpShape); sprintf(szBuffer, "%s", "fromText('"); pszExpression = msStringConcatenate(pszExpression, szBuffer); pszExpression = msStringConcatenate(pszExpression, pszWktText); sprintf(szBuffer, "%s", "')"); pszExpression = msStringConcatenate(pszExpression, szBuffer); msGEOSFreeWKT(pszWktText); /* (optional) beyond/dwithin distance, always 0.0 since we apply the distance as a buffer earlier */ if ((strcasecmp(psNode->pszValue, "DWithin") == 0 || strcasecmp(psNode->pszValue, "Beyond") == 0)) { // pszExpression = msStringConcatenate(pszExpression, ",0.0"); sprintf(szBuffer, ",%g", dfDistance); pszExpression = msStringConcatenate(pszExpression, szBuffer); } /* terminate the function */ pszExpression = msStringConcatenate(pszExpression, ") = TRUE"); } /* ** Cleanup */ if (bBBoxQuery) { msFreeShape(psTmpShape); msFree(psTmpShape); } return pszExpression; } char *FLTGetFeatureIdCommonExpression(FilterEncodingNode *psFilterNode, layerObj *lp) { char *pszExpression = NULL; int nTokens = 0, i=0, bString=0; char **tokens = NULL; const char *pszAttribute=NULL; #if defined(USE_WMS_SVR) || defined(USE_WFS_SVR) || defined(USE_WCS_SVR) || defined(USE_SOS_SVR) if (psFilterNode->pszValue) { pszAttribute = msOWSLookupMetadata(&(lp->metadata), "OFG", "featureid"); if (pszAttribute) { tokens = msStringSplit(psFilterNode->pszValue,',', &nTokens); if (tokens && nTokens > 0) { for (i=0; i<nTokens; i++) { char *pszTmp = NULL; int bufferSize = 0; const char* pszId = tokens[i]; const char* pszDot = strchr(pszId, '.'); if( pszDot ) pszId = pszDot + 1; if (i == 0) { if(FLTIsNumeric(pszId) == MS_FALSE) bString = 1; } if (bString) { bufferSize = 11+strlen(pszId)+strlen(pszAttribute)+1; pszTmp = (char *)msSmallMalloc(bufferSize); snprintf(pszTmp, bufferSize, "(\"[%s]\" ==\"%s\")" , pszAttribute, pszId); } else { bufferSize = 8+strlen(pszId)+strlen(pszAttribute)+1; pszTmp = (char *)msSmallMalloc(bufferSize); snprintf(pszTmp, bufferSize, "([%s] == %s)" , pszAttribute, pszId); } if (pszExpression != NULL) pszExpression = msStringConcatenate(pszExpression, " OR "); else pszExpression = msStringConcatenate(pszExpression, "("); pszExpression = msStringConcatenate(pszExpression, pszTmp); msFree(pszTmp); } msFreeCharArray(tokens, nTokens); } } /* opening and closing brackets are needed for mapserver expressions */ if (pszExpression) pszExpression = msStringConcatenate(pszExpression, ")"); } #endif return pszExpression; } char* FLTGetTimeExpression(FilterEncodingNode *psFilterNode, layerObj *lp) { char* pszExpression = NULL; const char* pszTimeField; const char* pszTimeValue; if (psFilterNode == NULL || lp == NULL) return NULL; if (psFilterNode->eType != FILTER_NODE_TYPE_TEMPORAL) return NULL; pszTimeValue = FLTGetDuring(psFilterNode, &pszTimeField); if (pszTimeField && pszTimeValue) { expressionObj old_filter; msInitExpression(&old_filter); msCopyExpression(&old_filter, &lp->filter); /* save existing filter */ msFreeExpression(&lp->filter); if (msLayerSetTimeFilter(lp, pszTimeValue, pszTimeField) == MS_TRUE) { pszExpression = msStrdup(lp->filter.string); } msCopyExpression(&lp->filter, &old_filter); /* restore old filter */ msFreeExpression(&old_filter); } return pszExpression; } char *FLTGetCommonExpression(FilterEncodingNode *psFilterNode, layerObj *lp) { char *pszExpression = NULL; if (!psFilterNode) return NULL; if (psFilterNode->eType == FILTER_NODE_TYPE_COMPARISON) { if ( psFilterNode->psLeftNode && psFilterNode->psRightNode) { if (FLTIsBinaryComparisonFilterType(psFilterNode->pszValue)) pszExpression = FLTGetBinaryComparisonCommonExpression(psFilterNode, lp); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsLike") == 0) pszExpression = FLTGetIsLikeComparisonCommonExpression(psFilterNode); else if (strcasecmp(psFilterNode->pszValue, "PropertyIsBetween") == 0) pszExpression = FLTGetIsBetweenComparisonCommonExpresssion(psFilterNode, lp); } } else if (psFilterNode->eType == FILTER_NODE_TYPE_LOGICAL) { pszExpression = FLTGetLogicalComparisonCommonExpression(psFilterNode, lp); } else if (psFilterNode->eType == FILTER_NODE_TYPE_SPATIAL) { pszExpression = FLTGetSpatialComparisonCommonExpression(psFilterNode, lp); } else if (psFilterNode->eType == FILTER_NODE_TYPE_FEATUREID) { pszExpression = FLTGetFeatureIdCommonExpression(psFilterNode, lp); } else if (psFilterNode->eType == FILTER_NODE_TYPE_TEMPORAL) { pszExpression = FLTGetTimeExpression(psFilterNode, lp); } return pszExpression; } int FLTApplyFilterToLayerCommonExpression(mapObj *map, int iLayerIndex, const char *pszExpression) { return FLTApplyFilterToLayerCommonExpressionWithRect(map, iLayerIndex, pszExpression, map->extent); } /* rect must be in map->projection */ int FLTApplyFilterToLayerCommonExpressionWithRect(mapObj *map, int iLayerIndex, const char *pszExpression, rectObj rect) { int retval; int save_startindex; int save_maxfeatures; int save_only_cache_result_count; save_startindex = map->query.startindex; save_maxfeatures = map->query.maxfeatures; save_only_cache_result_count = map->query.only_cache_result_count; msInitQuery(&(map->query)); map->query.startindex = save_startindex; map->query.maxfeatures = save_maxfeatures; map->query.only_cache_result_count = save_only_cache_result_count; map->query.mode = MS_QUERY_MULTIPLE; map->query.layer = iLayerIndex; map->query.rect = rect; if( pszExpression ) { map->query.type = MS_QUERY_BY_FILTER; msInitExpression(&map->query.filter); map->query.filter.string = msStrdup(pszExpression); map->query.filter.type = MS_EXPRESSION; /* a logical expression */ retval = msQueryByFilter(map); } else { map->query.type = MS_QUERY_BY_RECT; retval = msQueryByRect(map); } return retval; } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/good_3111_1
crossvul-cpp_data_bad_5654_0
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* Escape and unescape URL encoding in strings. The functions return a new * allocated string or NULL if an error occurred. */ #include "curl_setup.h" #include <curl/curl.h> #include "curl_memory.h" #include "urldata.h" #include "warnless.h" #include "non-ascii.h" #include "escape.h" #define _MPRINTF_REPLACE /* use our functions only */ #include <curl/mprintf.h> /* The last #include file should be: */ #include "memdebug.h" /* Portable character check (remember EBCDIC). Do not use isalnum() because its behavior is altered by the current locale. See http://tools.ietf.org/html/rfc3986#section-2.3 */ static bool Curl_isunreserved(unsigned char in) { switch (in) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '-': case '.': case '_': case '~': return TRUE; default: break; } return FALSE; } /* for ABI-compatibility with previous versions */ char *curl_escape(const char *string, int inlength) { return curl_easy_escape(NULL, string, inlength); } /* for ABI-compatibility with previous versions */ char *curl_unescape(const char *string, int length) { return curl_easy_unescape(NULL, string, length, NULL); } char *curl_easy_escape(CURL *handle, const char *string, int inlength) { size_t alloc = (inlength?(size_t)inlength:strlen(string))+1; char *ns; char *testing_ptr = NULL; unsigned char in; /* we need to treat the characters unsigned */ size_t newlen = alloc; size_t strindex=0; size_t length; CURLcode res; ns = malloc(alloc); if(!ns) return NULL; length = alloc-1; while(length--) { in = *string; if(Curl_isunreserved(in)) /* just copy this */ ns[strindex++]=in; else { /* encode it */ newlen += 2; /* the size grows with two, since this'll become a %XX */ if(newlen > alloc) { alloc *= 2; testing_ptr = realloc(ns, alloc); if(!testing_ptr) { free( ns ); return NULL; } else { ns = testing_ptr; } } res = Curl_convert_to_network(handle, &in, 1); if(res) { /* Curl_convert_to_network calls failf if unsuccessful */ free(ns); return NULL; } snprintf(&ns[strindex], 4, "%%%02X", in); strindex+=3; } string++; } ns[strindex]=0; /* terminate it */ return ns; } /* * Curl_urldecode() URL decodes the given string. * * Optionally detects control characters (byte codes lower than 32) in the * data and rejects such data. * * Returns a pointer to a malloced string in *ostring with length given in * *olen. If length == 0, the length is assumed to be strlen(string). * */ CURLcode Curl_urldecode(struct SessionHandle *data, const char *string, size_t length, char **ostring, size_t *olen, bool reject_ctrl) { size_t alloc = (length?length:strlen(string))+1; char *ns = malloc(alloc); unsigned char in; size_t strindex=0; unsigned long hex; CURLcode res; if(!ns) return CURLE_OUT_OF_MEMORY; while(--alloc > 0) { in = *string; if(('%' == in) && ISXDIGIT(string[1]) && ISXDIGIT(string[2])) { /* this is two hexadecimal digits following a '%' */ char hexstr[3]; char *ptr; hexstr[0] = string[1]; hexstr[1] = string[2]; hexstr[2] = 0; hex = strtoul(hexstr, &ptr, 16); in = curlx_ultouc(hex); /* this long is never bigger than 255 anyway */ res = Curl_convert_from_network(data, &in, 1); if(res) { /* Curl_convert_from_network calls failf if unsuccessful */ free(ns); return res; } string+=2; alloc-=2; } if(reject_ctrl && (in < 0x20)) { free(ns); return CURLE_URL_MALFORMAT; } ns[strindex++] = in; string++; } ns[strindex]=0; /* terminate it */ if(olen) /* store output size */ *olen = strindex; if(ostring) /* store output string */ *ostring = ns; return CURLE_OK; } /* * Unescapes the given URL escaped string of given length. Returns a * pointer to a malloced string with length given in *olen. * If length == 0, the length is assumed to be strlen(string). * If olen == NULL, no output length is stored. */ char *curl_easy_unescape(CURL *handle, const char *string, int length, int *olen) { char *str = NULL; size_t inputlen = length; size_t outputlen; CURLcode res = Curl_urldecode(handle, string, inputlen, &str, &outputlen, FALSE); if(res) return NULL; if(olen) *olen = curlx_uztosi(outputlen); return str; } /* For operating systems/environments that use different malloc/free systems for the app and for this library, we provide a free that uses the library's memory system */ void curl_free(void *p) { if(p) free(p); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5654_0
crossvul-cpp_data_good_342_10
/* * util.c: utility functions used by OpenSC command line tools. * * Copyright (C) 2011 OpenSC Project developers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #ifndef _WIN32 #include <termios.h> #else #include <conio.h> #endif #include <ctype.h> #include "util.h" #include "ui/notify.h" int is_string_valid_atr(const char *atr_str) { unsigned char atr[SC_MAX_ATR_SIZE]; size_t atr_len = sizeof(atr); if (sc_hex_to_bin(atr_str, atr, &atr_len)) return 0; if (atr_len < 2) return 0; if (atr[0] != 0x3B && atr[0] != 0x3F) return 0; return 1; } int util_connect_card_ex(sc_context_t *ctx, sc_card_t **cardp, const char *reader_id, int do_wait, int do_lock, int verbose) { struct sc_reader *reader = NULL, *found = NULL; struct sc_card *card = NULL; int r; sc_notify_init(); if (do_wait) { unsigned int event; if (sc_ctx_get_reader_count(ctx) == 0) { fprintf(stderr, "Waiting for a reader to be attached...\n"); r = sc_wait_for_event(ctx, SC_EVENT_READER_ATTACHED, &found, &event, -1, NULL); if (r < 0) { fprintf(stderr, "Error while waiting for a reader: %s\n", sc_strerror(r)); return 3; } r = sc_ctx_detect_readers(ctx); if (r < 0) { fprintf(stderr, "Error while refreshing readers: %s\n", sc_strerror(r)); return 3; } } fprintf(stderr, "Waiting for a card to be inserted...\n"); r = sc_wait_for_event(ctx, SC_EVENT_CARD_INSERTED, &found, &event, -1, NULL); if (r < 0) { fprintf(stderr, "Error while waiting for a card: %s\n", sc_strerror(r)); return 3; } reader = found; } else if (sc_ctx_get_reader_count(ctx) == 0) { fprintf(stderr, "No smart card readers found.\n"); return 1; } else { if (!reader_id) { unsigned int i; /* Automatically try to skip to a reader with a card if reader not specified */ for (i = 0; i < sc_ctx_get_reader_count(ctx); i++) { reader = sc_ctx_get_reader(ctx, i); if (sc_detect_card_presence(reader) & SC_READER_CARD_PRESENT) { fprintf(stderr, "Using reader with a card: %s\n", reader->name); goto autofound; } } /* If no reader had a card, default to the first reader */ reader = sc_ctx_get_reader(ctx, 0); } else { /* If the reader identifier looks like an ATR, try to find the reader with that card */ if (is_string_valid_atr(reader_id)) { unsigned char atr_buf[SC_MAX_ATR_SIZE]; size_t atr_buf_len = sizeof(atr_buf); unsigned int i; sc_hex_to_bin(reader_id, atr_buf, &atr_buf_len); /* Loop readers, looking for a card with ATR */ for (i = 0; i < sc_ctx_get_reader_count(ctx); i++) { struct sc_reader *rdr = sc_ctx_get_reader(ctx, i); if (!(sc_detect_card_presence(rdr) & SC_READER_CARD_PRESENT)) continue; else if (rdr->atr.len != atr_buf_len) continue; else if (memcmp(rdr->atr.value, atr_buf, rdr->atr.len)) continue; fprintf(stderr, "Matched ATR in reader: %s\n", rdr->name); reader = rdr; goto autofound; } } else { char *endptr = NULL; unsigned int num; errno = 0; num = strtol(reader_id, &endptr, 0); if (!errno && endptr && *endptr == '\0') reader = sc_ctx_get_reader(ctx, num); else reader = sc_ctx_get_reader_by_name(ctx, reader_id); } } autofound: if (!reader) { fprintf(stderr, "Reader \"%s\" not found (%d reader(s) detected)\n", reader_id, sc_ctx_get_reader_count(ctx)); return 1; } if (sc_detect_card_presence(reader) <= 0) { fprintf(stderr, "Card not present.\n"); return 3; } } if (verbose) printf("Connecting to card in reader %s...\n", reader->name); r = sc_connect_card(reader, &card); if (r < 0) { fprintf(stderr, "Failed to connect to card: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Using card driver %s.\n", card->driver->name); if (do_lock) { r = sc_lock(card); if (r < 0) { fprintf(stderr, "Failed to lock card: %s\n", sc_strerror(r)); sc_disconnect_card(card); return 1; } } *cardp = card; return 0; } int util_connect_card(sc_context_t *ctx, sc_card_t **cardp, const char *reader_id, int do_wait, int verbose) { return util_connect_card_ex(ctx, cardp, reader_id, do_wait, 1, verbose); } void util_print_binary(FILE *f, const u8 *buf, int count) { int i; for (i = 0; i < count; i++) { unsigned char c = buf[i]; const char *format; if (!isprint(c)) format = "\\x%02X"; else format = "%c"; fprintf(f, format, c); } (void) fflush(f); } void util_hex_dump(FILE *f, const u8 *in, int len, const char *sep) { int i; for (i = 0; i < len; i++) { if (sep != NULL && i) fprintf(f, "%s", sep); fprintf(f, "%02X", in[i]); } } void util_hex_dump_asc(FILE *f, const u8 *in, size_t count, int addr) { int lines = 0; while (count) { char ascbuf[17]; size_t i; if (addr >= 0) { fprintf(f, "%08X: ", addr); addr += 16; } for (i = 0; i < count && i < 16; i++) { fprintf(f, "%02X ", *in); if (isprint(*in)) ascbuf[i] = *in; else ascbuf[i] = '.'; in++; } count -= i; ascbuf[i] = 0; for (; i < 16 && lines; i++) fprintf(f, " "); fprintf(f, "%s\n", ascbuf); lines++; } } NORETURN void util_print_usage_and_die(const char *app_name, const struct option options[], const char *option_help[], const char *args) { int i; int header_shown = 0; if (args) printf("Usage: %s [OPTIONS] %s\n", app_name, args); else printf("Usage: %s [OPTIONS]\n", app_name); for (i = 0; options[i].name; i++) { char buf[40]; const char *arg_str; /* Skip "hidden" options */ if (option_help[i] == NULL) continue; if (!header_shown++) printf("Options:\n"); switch (options[i].has_arg) { case 1: arg_str = " <arg>"; break; case 2: arg_str = " [arg]"; break; default: arg_str = ""; break; } if (isascii(options[i].val) && isprint(options[i].val) && !isspace(options[i].val)) sprintf(buf, "-%c, --%s%s", options[i].val, options[i].name, arg_str); else sprintf(buf, " --%s%s", options[i].name, arg_str); /* print the line - wrap if necessary */ if (strlen(buf) > 28) { printf(" %s\n", buf); buf[0] = '\0'; } printf(" %-28s %s\n", buf, option_help[i]); } exit(2); } const char * util_acl_to_str(const sc_acl_entry_t *e) { static char line[80], buf[20]; unsigned int acl; if (e == NULL) return "N/A"; line[0] = 0; while (e != NULL) { acl = e->method; switch (acl) { case SC_AC_UNKNOWN: return "N/A"; case SC_AC_NEVER: return "NEVR"; case SC_AC_NONE: return "NONE"; case SC_AC_CHV: strcpy(buf, "CHV"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "%d", e->key_ref); break; case SC_AC_TERM: strcpy(buf, "TERM"); break; case SC_AC_PRO: strcpy(buf, "PROT"); break; case SC_AC_AUT: strcpy(buf, "AUTH"); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 4, "%d", e->key_ref); break; case SC_AC_SEN: strcpy(buf, "Sec.Env. "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; case SC_AC_SCB: strcpy(buf, "Sec.ControlByte "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "Ox%X", e->key_ref); break; case SC_AC_IDA: strcpy(buf, "PKCS#15 AuthID "); if (e->key_ref != SC_AC_KEY_REF_NONE) sprintf(buf + 3, "#%d", e->key_ref); break; default: strcpy(buf, "????"); break; } strncat(line, buf, sizeof line); strncat(line, " ", sizeof line); e = e->next; } line[(sizeof line)-1] = '\0'; /* make sure it's NUL terminated */ line[strlen(line)-1] = 0; /* get rid of trailing space */ return line; } NORETURN void util_fatal(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "error: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\nAborting.\n"); va_end(ap); sc_notify_close(); exit(1); } void util_error(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "error: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); } void util_warn(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "warning: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); } int util_getpass (char **lineptr, size_t *len, FILE *stream) { #define MAX_PASS_SIZE 128 char *buf; size_t i; int ch = 0; #ifndef _WIN32 struct termios old, new; fflush(stdout); if (tcgetattr (fileno (stdout), &old) != 0) return -1; new = old; new.c_lflag &= ~ECHO; if (tcsetattr (fileno (stdout), TCSAFLUSH, &new) != 0) return -1; #endif buf = calloc(1, MAX_PASS_SIZE); if (!buf) return -1; for (i = 0; i < MAX_PASS_SIZE - 1; i++) { #ifndef _WIN32 ch = getchar(); #else ch = _getch(); #endif if (ch == 0 || ch == 3) break; if (ch == '\n' || ch == '\r') break; buf[i] = (char) ch; } #ifndef _WIN32 tcsetattr (fileno (stdout), TCSAFLUSH, &old); fputs("\n", stdout); #endif if (ch == 0 || ch == 3) { free(buf); return -1; } if (*lineptr && (!len || *len < i+1)) { free(*lineptr); *lineptr = NULL; } if (*lineptr) { memcpy(*lineptr,buf,i+1); memset(buf, 0, MAX_PASS_SIZE); free(buf); } else { *lineptr = buf; if (len) *len = MAX_PASS_SIZE; } return i; } size_t util_get_pin(const char *input, const char **pin) { size_t inputlen = strlen(input); size_t pinlen = 0; if(inputlen > 4 && strncasecmp(input, "env:", 4) == 0) { // Get a PIN from a environment variable *pin = getenv(input + 4); pinlen = *pin ? strlen(*pin) : 0; } else { //Just use the input *pin = input; pinlen = inputlen; } return pinlen; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_342_10
crossvul-cpp_data_bad_345_6
/* * pkcs15-sc-hsm.c : Initialize PKCS#15 emulation * * Copyright (C) 2012 Andreas Schwier, CardContact, Minden, Germany * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #include "asn1.h" #include "common/compat_strlcpy.h" #include "common/compat_strnlen.h" #include "card-sc-hsm.h" extern struct sc_aid sc_hsm_aid; void sc_hsm_set_serialnr(sc_card_t *card, char *serial); static struct ec_curve curves[] = { { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x01", 8}, // secp192r1 aka prime192r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 24}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 24}, { (unsigned char *) "\x64\x21\x05\x19\xE5\x9C\x80\xE7\x0F\xA7\xE9\xAB\x72\x24\x30\x49\xFE\xB8\xDE\xEC\xC1\x46\xB9\xB1", 24}, { (unsigned char *) "\x04\x18\x8D\xA8\x0E\xB0\x30\x90\xF6\x7C\xBF\x20\xEB\x43\xA1\x88\x00\xF4\xFF\x0A\xFD\x82\xFF\x10\x12\x07\x19\x2B\x95\xFF\xC8\xDA\x78\x63\x10\x11\xED\x6B\x24\xCD\xD5\x73\xF9\x77\xA1\x1E\x79\x48\x11", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x99\xDE\xF8\x36\x14\x6B\xC9\xB1\xB4\xD2\x28\x31", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x07", 8}, // secp256r1 aka prime256r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 32}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 32}, { (unsigned char *) "\x5A\xC6\x35\xD8\xAA\x3A\x93\xE7\xB3\xEB\xBD\x55\x76\x98\x86\xBC\x65\x1D\x06\xB0\xCC\x53\xB0\xF6\x3B\xCE\x3C\x3E\x27\xD2\x60\x4B", 32}, { (unsigned char *) "\x04\x6B\x17\xD1\xF2\xE1\x2C\x42\x47\xF8\xBC\xE6\xE5\x63\xA4\x40\xF2\x77\x03\x7D\x81\x2D\xEB\x33\xA0\xF4\xA1\x39\x45\xD8\x98\xC2\x96\x4F\xE3\x42\xE2\xFE\x1A\x7F\x9B\x8E\xE7\xEB\x4A\x7C\x0F\x9E\x16\x2B\xCE\x33\x57\x6B\x31\x5E\xCE\xCB\xB6\x40\x68\x37\xBF\x51\xF5", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xBC\xE6\xFA\xAD\xA7\x17\x9E\x84\xF3\xB9\xCA\xC2\xFC\x63\x25\x51", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x03", 9}, // brainpoolP192r1 { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x30\x93\xD1\x8D\xB7\x8F\xCE\x47\x6D\xE1\xA8\x62\x97", 24}, { (unsigned char *) "\x6A\x91\x17\x40\x76\xB1\xE0\xE1\x9C\x39\xC0\x31\xFE\x86\x85\xC1\xCA\xE0\x40\xE5\xC6\x9A\x28\xEF", 24}, { (unsigned char *) "\x46\x9A\x28\xEF\x7C\x28\xCC\xA3\xDC\x72\x1D\x04\x4F\x44\x96\xBC\xCA\x7E\xF4\x14\x6F\xBF\x25\xC9", 24}, { (unsigned char *) "\x04\xC0\xA0\x64\x7E\xAA\xB6\xA4\x87\x53\xB0\x33\xC5\x6C\xB0\xF0\x90\x0A\x2F\x5C\x48\x53\x37\x5F\xD6\x14\xB6\x90\x86\x6A\xBD\x5B\xB8\x8B\x5F\x48\x28\xC1\x49\x00\x02\xE6\x77\x3F\xA2\xFA\x29\x9B\x8F", 49}, { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x2F\x9E\x9E\x91\x6B\x5B\xE8\xF1\x02\x9A\xC4\xAC\xC1", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x05", 9}, // brainpoolP224r1 { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD1\xD7\x87\xB0\x9F\x07\x57\x97\xDA\x89\xF5\x7E\xC8\xC0\xFF", 28}, { (unsigned char *) "\x68\xA5\xE6\x2C\xA9\xCE\x6C\x1C\x29\x98\x03\xA6\xC1\x53\x0B\x51\x4E\x18\x2A\xD8\xB0\x04\x2A\x59\xCA\xD2\x9F\x43", 28}, { (unsigned char *) "\x25\x80\xF6\x3C\xCF\xE4\x41\x38\x87\x07\x13\xB1\xA9\x23\x69\xE3\x3E\x21\x35\xD2\x66\xDB\xB3\x72\x38\x6C\x40\x0B", 28}, { (unsigned char *) "\x04\x0D\x90\x29\xAD\x2C\x7E\x5C\xF4\x34\x08\x23\xB2\xA8\x7D\xC6\x8C\x9E\x4C\xE3\x17\x4C\x1E\x6E\xFD\xEE\x12\xC0\x7D\x58\xAA\x56\xF7\x72\xC0\x72\x6F\x24\xC6\xB8\x9E\x4E\xCD\xAC\x24\x35\x4B\x9E\x99\xCA\xA3\xF6\xD3\x76\x14\x02\xCD", 57}, { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD0\xFB\x98\xD1\x16\xBC\x4B\x6D\xDE\xBC\xA3\xA5\xA7\x93\x9F", 28}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x07", 9}, // brainpoolP256r1 { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x72\x6E\x3B\xF6\x23\xD5\x26\x20\x28\x20\x13\x48\x1D\x1F\x6E\x53\x77", 32}, { (unsigned char *) "\x7D\x5A\x09\x75\xFC\x2C\x30\x57\xEE\xF6\x75\x30\x41\x7A\xFF\xE7\xFB\x80\x55\xC1\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9", 32}, { (unsigned char *) "\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9\xBB\xD7\x7C\xBF\x95\x84\x16\x29\x5C\xF7\xE1\xCE\x6B\xCC\xDC\x18\xFF\x8C\x07\xB6", 32}, { (unsigned char *) "\x04\x8B\xD2\xAE\xB9\xCB\x7E\x57\xCB\x2C\x4B\x48\x2F\xFC\x81\xB7\xAF\xB9\xDE\x27\xE1\xE3\xBD\x23\xC2\x3A\x44\x53\xBD\x9A\xCE\x32\x62\x54\x7E\xF8\x35\xC3\xDA\xC4\xFD\x97\xF8\x46\x1A\x14\x61\x1D\xC9\xC2\x77\x45\x13\x2D\xED\x8E\x54\x5C\x1D\x54\xC7\x2F\x04\x69\x97", 65}, { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x71\x8C\x39\x7A\xA3\xB5\x61\xA6\xF7\x90\x1E\x0E\x82\x97\x48\x56\xA7", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x09", 9}, // brainpoolP320r1 { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA6\xF6\xF4\x0D\xEF\x4F\x92\xB9\xEC\x78\x93\xEC\x28\xFC\xD4\x12\xB1\xF1\xB3\x2E\x27", 40}, { (unsigned char *) "\x3E\xE3\x0B\x56\x8F\xBA\xB0\xF8\x83\xCC\xEB\xD4\x6D\x3F\x3B\xB8\xA2\xA7\x35\x13\xF5\xEB\x79\xDA\x66\x19\x0E\xB0\x85\xFF\xA9\xF4\x92\xF3\x75\xA9\x7D\x86\x0E\xB4", 40}, { (unsigned char *) "\x52\x08\x83\x94\x9D\xFD\xBC\x42\xD3\xAD\x19\x86\x40\x68\x8A\x6F\xE1\x3F\x41\x34\x95\x54\xB4\x9A\xCC\x31\xDC\xCD\x88\x45\x39\x81\x6F\x5E\xB4\xAC\x8F\xB1\xF1\xA6", 40}, { (unsigned char *) "\x04\x43\xBD\x7E\x9A\xFB\x53\xD8\xB8\x52\x89\xBC\xC4\x8E\xE5\xBF\xE6\xF2\x01\x37\xD1\x0A\x08\x7E\xB6\xE7\x87\x1E\x2A\x10\xA5\x99\xC7\x10\xAF\x8D\x0D\x39\xE2\x06\x11\x14\xFD\xD0\x55\x45\xEC\x1C\xC8\xAB\x40\x93\x24\x7F\x77\x27\x5E\x07\x43\xFF\xED\x11\x71\x82\xEA\xA9\xC7\x78\x77\xAA\xAC\x6A\xC7\xD3\x52\x45\xD1\x69\x2E\x8E\xE1", 81}, { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA5\xB6\x8F\x12\xA3\x2D\x48\x2E\xC7\xEE\x86\x58\xE9\x86\x91\x55\x5B\x44\xC5\x93\x11", 40}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x1F", 5}, // secp192k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xEE\x37", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03", 24}, { (unsigned char *) "\x04\xDB\x4F\xF1\x0E\xC0\x57\xE9\xAE\x26\xB0\x7D\x02\x80\xB7\xF4\x34\x1D\xA5\xD1\xB1\xEA\xE0\x6C\x7D\x9B\x2F\x2F\x6D\x9C\x56\x28\xA7\x84\x41\x63\xD0\x15\xBE\x86\x34\x40\x82\xAA\x88\xD9\x5E\x2F\x9D", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\x26\xF2\xFC\x17\x0F\x69\x46\x6A\x74\xDE\xFD\x8D", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x0A", 5}, // secp256k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFC\x2F", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07", 32}, { (unsigned char *) "\x04\x79\xBE\x66\x7E\xF9\xDC\xBB\xAC\x55\xA0\x62\x95\xCE\x87\x0B\x07\x02\x9B\xFC\xDB\x2D\xCE\x28\xD9\x59\xF2\x81\x5B\x16\xF8\x17\x98\x48\x3A\xDA\x77\x26\xA3\xC4\x65\x5D\xA4\xFB\xFC\x0E\x11\x08\xA8\xFD\x17\xB4\x48\xA6\x85\x54\x19\x9C\x47\xD0\x8F\xFB\x10\xD4\xB8", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xBA\xAE\xDC\xE6\xAF\x48\xA0\x3B\xBF\xD2\x5E\x8C\xD0\x36\x41\x41", 32}, { (unsigned char *) "\x01", 1} }, { { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0} } }; #define C_ASN1_CVC_PUBKEY_SIZE 10 static const struct sc_asn1_entry c_asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE] = { { "publicKeyOID", SC_ASN1_OBJECT, SC_ASN1_UNI | SC_ASN1_OBJECT, 0, NULL, NULL }, { "primeOrModulus", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 1, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientAorExponent", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 2, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientB", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 3, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "basePointG", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 4, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "order", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 5, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "publicPoint", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 6, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "cofactor", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 7, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "modulusSize", SC_ASN1_INTEGER, SC_ASN1_UNI | SC_ASN1_INTEGER, SC_ASN1_OPTIONAL, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_BODY_SIZE 5 static const struct sc_asn1_entry c_asn1_cvc_body[C_ASN1_CVC_BODY_SIZE] = { { "certificateProfileIdentifier", SC_ASN1_INTEGER, SC_ASN1_APP | 0x1F29, 0, NULL, NULL }, { "certificationAuthorityReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "publicKey", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F49, 0, NULL, NULL }, { "certificateHolderReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 0x1F20, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVCERT_SIZE 3 static const struct sc_asn1_entry c_asn1_cvcert[C_ASN1_CVCERT_SIZE] = { { "certificateBody", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F4E, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_SIZE 2 static const struct sc_asn1_entry c_asn1_cvc[C_ASN1_CVC_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_AUTHREQ_SIZE 4 static const struct sc_asn1_entry c_asn1_authreq[C_ASN1_AUTHREQ_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { "outerCAR", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_REQ_SIZE 2 static const struct sc_asn1_entry c_asn1_req[C_ASN1_REQ_SIZE] = { { "authenticatedrequest", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 7, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; static int read_file(sc_pkcs15_card_t * p15card, u8 fid[2], u8 *efbin, size_t *len, int optional) { sc_path_t path; int r; sc_path_set(&path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); /* look this up with our AID */ path.aid = sc_hsm_aid; /* we don't have a pre-known size of the file */ path.count = -1; if (!p15card->opts.use_file_cache || !efbin || SC_SUCCESS != sc_pkcs15_read_cached_file(p15card, &path, &efbin, len)) { /* avoid re-selection of SC-HSM */ path.aid.len = 0; r = sc_select_file(p15card->card, &path, NULL); if (r < 0) { sc_log(p15card->card->ctx, "Could not select EF"); } else { r = sc_read_binary(p15card->card, 0, efbin, *len, 0); } if (r < 0) { sc_log(p15card->card->ctx, "Could not read EF"); if (!optional) { return r; } /* optional files are saved as empty files to avoid card * transactions. Parsing the file's data will reveal that they were * missing. */ *len = 0; } else { *len = r; } if (p15card->opts.use_file_cache) { /* save this with our AID */ path.aid = sc_hsm_aid; sc_pkcs15_cache_file(p15card, &path, efbin, *len); } } return SC_SUCCESS; } /* * Decode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_decode_cvc(sc_pkcs15_card_t * p15card, const u8 ** buf, size_t *buflen, sc_cvc_t *cvc) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_req[C_ASN1_REQ_SIZE]; struct sc_asn1_entry asn1_authreq[C_ASN1_AUTHREQ_SIZE]; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; unsigned int cla,tag; size_t taglen; size_t lenchr = sizeof(cvc->chr); size_t lencar = sizeof(cvc->car); size_t lenoutercar = sizeof(cvc->outer_car); const u8 *tbuf; int r; memset(cvc, 0, sizeof(*cvc)); sc_copy_asn1_entry(c_asn1_req, asn1_req); sc_copy_asn1_entry(c_asn1_authreq, asn1_authreq); sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 1, &cvc->primeOrModulus, &cvc->primeOrModuluslen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 2, &cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 3, &cvc->coefficientB, &cvc->coefficientBlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 4, &cvc->basePointG, &cvc->basePointGlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 5, &cvc->order, &cvc->orderlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 6, &cvc->publicPoint, &cvc->publicPointlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 7, &cvc->cofactor, &cvc->cofactorlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 0); sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 0); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 0); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 0); sc_format_asn1_entry(asn1_cvcert + 1, &cvc->signature, &cvc->signatureLen, 0); sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq + 1, &cvc->outer_car, &lenoutercar, 0); sc_format_asn1_entry(asn1_authreq + 2, &cvc->outerSignature, &cvc->outerSignatureLen, 0); sc_format_asn1_entry(asn1_req , &asn1_authreq, NULL, 0); /* sc_asn1_print_tags(*buf, *buflen); */ tbuf = *buf; r = sc_asn1_read_tag(&tbuf, *buflen, &cla, &tag, &taglen); LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); /* Determine if we deal with an authenticated request, plain request or certificate */ if ((cla == (SC_ASN1_TAG_APPLICATION|SC_ASN1_TAG_CONSTRUCTED)) && (tag == 7)) { r = sc_asn1_decode(card->ctx, asn1_req, *buf, *buflen, buf, buflen); } else { r = sc_asn1_decode(card->ctx, asn1_cvc, *buf, *buflen, buf, buflen); } LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Encode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_encode_cvc(sc_pkcs15_card_t * p15card, sc_cvc_t *cvc, u8 ** buf, size_t *buflen) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; size_t lenchr; size_t lencar; int r; sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); asn1_cvc_pubkey[1].flags = SC_ASN1_OPTIONAL; asn1_cvcert[1].flags = SC_ASN1_OPTIONAL; sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 1); if (cvc->primeOrModulus && (cvc->primeOrModuluslen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 1, cvc->primeOrModulus, &cvc->primeOrModuluslen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 2, cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 1); if (cvc->coefficientB && (cvc->coefficientBlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 3, cvc->coefficientB, &cvc->coefficientBlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 4, cvc->basePointG, &cvc->basePointGlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 5, cvc->order, &cvc->orderlen, 1); if (cvc->publicPoint && (cvc->publicPointlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 6, cvc->publicPoint, &cvc->publicPointlen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 7, cvc->cofactor, &cvc->cofactorlen, 1); } if (cvc->modulusSize > 0) { sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 1); } sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 1); lencar = strnlen(cvc->car, sizeof cvc->car); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 1); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 1); lenchr = strnlen(cvc->chr, sizeof cvc->chr); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 1); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 1); if (cvc->signature && (cvc->signatureLen > 0)) { sc_format_asn1_entry(asn1_cvcert + 1, cvc->signature, &cvc->signatureLen, 1); } sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 1); r = sc_asn1_encode(card->ctx, asn1_cvc, buf, buflen); LOG_TEST_RET(card->ctx, r, "Could not encode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_get_curve(struct ec_curve **curve, u8 *oid, size_t oidlen) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].oid.len == oidlen) && !memcmp(curves[i].oid.value, oid, oidlen)) { *curve = &curves[i]; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } int sc_pkcs15emu_sc_hsm_get_curve_oid(sc_cvc_t *cvc, const struct sc_lv_data **oid) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].prime.len == cvc->primeOrModuluslen) && !memcmp(curves[i].prime.value, cvc->primeOrModulus, cvc->primeOrModuluslen)) { *oid = &curves[i].oid; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } static int sc_pkcs15emu_sc_hsm_get_rsa_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { pubkey->algorithm = SC_ALGORITHM_RSA; pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) return SC_ERROR_OUT_OF_MEMORY; pubkey->alg_id->algorithm = SC_ALGORITHM_RSA; pubkey->u.rsa.modulus.len = cvc->primeOrModuluslen; pubkey->u.rsa.modulus.data = malloc(pubkey->u.rsa.modulus.len); pubkey->u.rsa.exponent.len = cvc->coefficientAorExponentlen; pubkey->u.rsa.exponent.data = malloc(pubkey->u.rsa.exponent.len); if (!pubkey->u.rsa.modulus.data || !pubkey->u.rsa.exponent.data) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.rsa.exponent.data, cvc->coefficientAorExponent, pubkey->u.rsa.exponent.len); memcpy(pubkey->u.rsa.modulus.data, cvc->primeOrModulus, pubkey->u.rsa.modulus.len); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_get_ec_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { struct sc_ec_parameters *ecp; const struct sc_lv_data *oid; int r; pubkey->algorithm = SC_ALGORITHM_EC; r = sc_pkcs15emu_sc_hsm_get_curve_oid(cvc, &oid); if (r != SC_SUCCESS) return r; ecp = calloc(1, sizeof(struct sc_ec_parameters)); if (!ecp) return SC_ERROR_OUT_OF_MEMORY; ecp->der.len = oid->len + 2; ecp->der.value = calloc(ecp->der.len, 1); if (!ecp->der.value) { free(ecp); return SC_ERROR_OUT_OF_MEMORY; } *(ecp->der.value + 0) = 0x06; *(ecp->der.value + 1) = (u8)oid->len; memcpy(ecp->der.value + 2, oid->value, oid->len); ecp->type = 1; // Named curve pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) { free(ecp->der.value); free(ecp); return SC_ERROR_OUT_OF_MEMORY; } pubkey->alg_id->algorithm = SC_ALGORITHM_EC; pubkey->alg_id->params = ecp; pubkey->u.ec.ecpointQ.value = malloc(cvc->publicPointlen); if (!pubkey->u.ec.ecpointQ.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.ecpointQ.value, cvc->publicPoint, cvc->publicPointlen); pubkey->u.ec.ecpointQ.len = cvc->publicPointlen; pubkey->u.ec.params.der.value = malloc(ecp->der.len); if (!pubkey->u.ec.params.der.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.params.der.value, ecp->der.value, ecp->der.len); pubkey->u.ec.params.der.len = ecp->der.len; /* FIXME: check return value? */ sc_pkcs15_fix_ec_parameters(ctx, &pubkey->u.ec.params); return SC_SUCCESS; } int sc_pkcs15emu_sc_hsm_get_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { if (cvc->publicPoint && cvc->publicPointlen) { return sc_pkcs15emu_sc_hsm_get_ec_public_key(ctx, cvc, pubkey); } else { return sc_pkcs15emu_sc_hsm_get_rsa_public_key(ctx, cvc, pubkey); } } void sc_pkcs15emu_sc_hsm_free_cvc(sc_cvc_t *cvc) { if (cvc->signature) { free(cvc->signature); cvc->signature = NULL; } if (cvc->primeOrModulus) { free(cvc->primeOrModulus); cvc->primeOrModulus = NULL; } if (cvc->coefficientAorExponent) { free(cvc->coefficientAorExponent); cvc->coefficientAorExponent = NULL; } if (cvc->coefficientB) { free(cvc->coefficientB); cvc->coefficientB = NULL; } if (cvc->basePointG) { free(cvc->basePointG); cvc->basePointG = NULL; } if (cvc->order) { free(cvc->order); cvc->order = NULL; } if (cvc->publicPoint) { free(cvc->publicPoint); cvc->publicPoint = NULL; } if (cvc->cofactor) { free(cvc->cofactor); cvc->cofactor = NULL; } } static int sc_pkcs15emu_sc_hsm_add_pubkey(sc_pkcs15_card_t *p15card, u8 *efbin, size_t len, sc_pkcs15_prkey_info_t *key_info, char *label) { struct sc_context *ctx = p15card->card->ctx; sc_card_t *card = p15card->card; sc_pkcs15_pubkey_info_t pubkey_info; sc_pkcs15_object_t pubkey_obj; struct sc_pkcs15_pubkey pubkey; sc_cvc_t cvc; u8 *cvcpo; int r; cvcpo = efbin; memset(&cvc, 0, sizeof(cvc)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&cvcpo, &len, &cvc); LOG_TEST_RET(ctx, r, "Could decode certificate signing request"); memset(&pubkey, 0, sizeof(pubkey)); r = sc_pkcs15emu_sc_hsm_get_public_key(ctx, &cvc, &pubkey); LOG_TEST_RET(card->ctx, r, "Could not extract public key"); memset(&pubkey_info, 0, sizeof(pubkey_info)); memset(&pubkey_obj, 0, sizeof(pubkey_obj)); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_obj.content.value, &pubkey_obj.content.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_info.direct.raw.value, &pubkey_info.direct.raw.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey_as_spki(ctx, &pubkey, &pubkey_info.direct.spki.value, &pubkey_info.direct.spki.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); pubkey_info.id = key_info->id; strlcpy(pubkey_obj.label, label, sizeof(pubkey_obj.label)); if (pubkey.algorithm == SC_ALGORITHM_RSA) { pubkey_info.modulus_length = pubkey.u.rsa.modulus.len << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_ENCRYPT|SC_PKCS15_PRKEY_USAGE_VERIFY|SC_PKCS15_PRKEY_USAGE_WRAP; r = sc_pkcs15emu_add_rsa_pubkey(p15card, &pubkey_obj, &pubkey_info); } else { /* TODO fix if support of non multiple of 8 curves are added */ pubkey_info.field_length = cvc.primeOrModuluslen << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_VERIFY; r = sc_pkcs15emu_add_ec_pubkey(p15card, &pubkey_obj, &pubkey_info); } LOG_TEST_RET(ctx, r, "Could not add public key"); sc_pkcs15emu_sc_hsm_free_cvc(&cvc); sc_pkcs15_erase_pubkey(&pubkey); return SC_SUCCESS; } /* * Add a key and the key description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_prkd(sc_pkcs15_card_t * p15card, u8 keyid) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t cert_info; sc_pkcs15_object_t cert_obj; struct sc_pkcs15_object prkd; sc_pkcs15_prkey_info_t *key_info; u8 fid[2]; /* enough to hold a complete certificate */ u8 efbin[4096]; u8 *ptr; size_t len; int r; fid[0] = PRKD_PREFIX; fid[1] = keyid; /* Try to select a related EF containing the PKCS#15 description of the key */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); ptr = efbin; memset(&prkd, 0, sizeof(prkd)); r = sc_pkcs15_decode_prkdf_entry(p15card, &prkd, (const u8 **)&ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); /* All keys require user PIN authentication */ prkd.auth_id.len = 1; prkd.auth_id.value[0] = 1; /* * Set private key flag as all keys are private anyway */ prkd.flags |= SC_PKCS15_CO_FLAG_PRIVATE; key_info = (sc_pkcs15_prkey_info_t *)prkd.data; key_info->key_reference = keyid; key_info->path.aid.len = 0; if (prkd.type == SC_PKCS15_TYPE_PRKEY_RSA) { r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkd, key_info); } else { r = sc_pkcs15emu_add_ec_prkey(p15card, &prkd, key_info); } LOG_TEST_RET(card->ctx, r, "Could not add private key to framework"); /* Check if we also have a certificate for the private key */ fid[0] = EE_CERTIFICATE_PREFIX; len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 0); LOG_TEST_RET(card->ctx, r, "Could not read EF"); if (efbin[0] == 0x67) { /* Decode CSR and create public key object */ sc_pkcs15emu_sc_hsm_add_pubkey(p15card, efbin, len, key_info, prkd.label); free(key_info); return SC_SUCCESS; /* Ignore any errors */ } if (efbin[0] != 0x30) { free(key_info); return SC_SUCCESS; } memset(&cert_info, 0, sizeof(cert_info)); memset(&cert_obj, 0, sizeof(cert_obj)); cert_info.id = key_info->id; sc_path_set(&cert_info.path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); cert_info.path.count = -1; if (p15card->opts.use_file_cache) { /* look this up with our AID, which should already be cached from the * call to `read_file`. This may have the side effect that OpenSC's * caching layer re-selects our applet *if the cached file cannot be * found/used* and we may loose the authentication status. We assume * that caching works perfectly without this side effect. */ cert_info.path.aid = sc_hsm_aid; } strlcpy(cert_obj.label, prkd.label, sizeof(cert_obj.label)); r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info); free(key_info); LOG_TEST_RET(card->ctx, r, "Could not add certificate"); return SC_SUCCESS; } /* * Add a data object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_dcod(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_data_info_t *data_info; sc_pkcs15_object_t data_obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = DCOD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&data_obj, 0, sizeof(data_obj)); r = sc_pkcs15_decode_dodf_entry(p15card, &data_obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Could not decode optional EF.DCOD"); data_info = (sc_pkcs15_data_info_t *)data_obj.data; r = sc_pkcs15emu_add_data_object(p15card, &data_obj, data_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } /* * Add a unrelated certificate object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_cd(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t *cert_info; sc_pkcs15_object_t obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = CD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&obj, 0, sizeof(obj)); r = sc_pkcs15_decode_cdf_entry(p15card, &obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.CDOD"); cert_info = (sc_pkcs15_cert_info_t *)obj.data; r = sc_pkcs15emu_add_x509_cert(p15card, &obj, cert_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_read_tokeninfo (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; int r; u8 efbin[512]; size_t len; LOG_FUNC_CALLED(card->ctx); /* Read token info */ len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x03", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); r = sc_pkcs15_parse_tokeninfo(card->ctx, p15card->tokeninfo, efbin, len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Initialize PKCS#15 emulation with user PIN, private keys, certificate and data objects * */ static int sc_pkcs15emu_sc_hsm_init (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; sc_hsm_private_data_t *priv = (sc_hsm_private_data_t *) card->drv_data; sc_file_t *file = NULL; sc_path_t path; u8 filelist[MAX_EXT_APDU_LENGTH]; int filelistlength; int r, i; sc_cvc_t devcert; struct sc_app_info *appinfo; struct sc_pkcs15_auth_info pin_info; struct sc_pkcs15_object pin_obj; struct sc_pin_cmd_data pindata; u8 efbin[1024]; u8 *ptr; size_t len; LOG_FUNC_CALLED(card->ctx); appinfo = calloc(1, sizeof(struct sc_app_info)); if (appinfo == NULL) { LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->aid = sc_hsm_aid; appinfo->ddo.aid = sc_hsm_aid; p15card->app = appinfo; sc_path_set(&path, SC_PATH_TYPE_DF_NAME, sc_hsm_aid.value, sc_hsm_aid.len, 0, 0); r = sc_select_file(card, &path, &file); LOG_TEST_RET(card->ctx, r, "Could not select SmartCard-HSM application"); p15card->card->version.hw_major = 24; /* JCOP 2.4.1r3 */ p15card->card->version.hw_minor = 13; if (file && file->prop_attr && file->prop_attr_len >= 2) { p15card->card->version.fw_major = file->prop_attr[file->prop_attr_len - 2]; p15card->card->version.fw_minor = file->prop_attr[file->prop_attr_len - 1]; } sc_file_free(file); /* Read device certificate to determine serial number */ if (priv->EF_C_DevAut && priv->EF_C_DevAut_len) { ptr = priv->EF_C_DevAut; len = priv->EF_C_DevAut_len; } else { len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x02", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.C_DevAut"); /* save EF_C_DevAut for further use */ ptr = realloc(priv->EF_C_DevAut, len); if (ptr) { memcpy(ptr, efbin, len); priv->EF_C_DevAut = ptr; priv->EF_C_DevAut_len = len; } ptr = efbin; } memset(&devcert, 0 ,sizeof(devcert)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&ptr, &len, &devcert); LOG_TEST_RET(card->ctx, r, "Could not decode EF.C_DevAut"); sc_pkcs15emu_sc_hsm_read_tokeninfo(p15card); if (p15card->tokeninfo->label == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->label = strdup("GoID"); } else { p15card->tokeninfo->label = strdup("SmartCard-HSM"); } if (p15card->tokeninfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } if ((p15card->tokeninfo->manufacturer_id != NULL) && !strcmp("(unknown)", p15card->tokeninfo->manufacturer_id)) { free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = NULL; } if (p15card->tokeninfo->manufacturer_id == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->manufacturer_id = strdup("Bundesdruckerei GmbH"); } else { p15card->tokeninfo->manufacturer_id = strdup("www.CardContact.de"); } if (p15card->tokeninfo->manufacturer_id == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->label = strdup(p15card->tokeninfo->label); if (appinfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); len = strnlen(devcert.chr, sizeof devcert.chr); /* Strip last 5 digit sequence number from CHR */ assert(len >= 8); len -= 5; p15card->tokeninfo->serial_number = calloc(len + 1, 1); if (p15card->tokeninfo->serial_number == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(p15card->tokeninfo->serial_number, devcert.chr, len); *(p15card->tokeninfo->serial_number + len) = 0; sc_hsm_set_serialnr(card, p15card->tokeninfo->serial_number); sc_pkcs15emu_sc_hsm_free_cvc(&devcert); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 1; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x81; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_EXCHANGE_REF_DATA; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; pin_info.attrs.pin.min_length = 6; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 15; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 3; pin_info.max_tries = 3; pin_obj.auth_id.len = 1; pin_obj.auth_id.value[0] = 2; strlcpy(pin_obj.label, "UserPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE|SC_PKCS15_CO_FLAG_MODIFIABLE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 2; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x88; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_UNBLOCK_DISABLED|SC_PKCS15_PIN_FLAG_SO_PIN; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_BCD; pin_info.attrs.pin.min_length = 16; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 16; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 15; pin_info.max_tries = 15; strlcpy(pin_obj.label, "SOPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); if (card->type == SC_CARD_TYPE_SC_HSM_SOC || card->type == SC_CARD_TYPE_SC_HSM_GOID) { /* SC-HSM of this type always has a PIN-Pad */ r = SC_SUCCESS; } else { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x85; r = sc_pin_cmd(card, &pindata, NULL); } if (r == SC_ERROR_DATA_OBJECT_NOT_FOUND) { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x86; r = sc_pin_cmd(card, &pindata, NULL); } if ((r != SC_ERROR_DATA_OBJECT_NOT_FOUND) && (r != SC_ERROR_INCORRECT_PARAMETERS)) card->caps |= SC_CARD_CAP_PROTECTED_AUTHENTICATION_PATH; filelistlength = sc_list_files(card, filelist, sizeof(filelist)); LOG_TEST_RET(card->ctx, filelistlength, "Could not enumerate file and key identifier"); for (i = 0; i < filelistlength; i += 2) { switch(filelist[i]) { case KEY_PREFIX: r = sc_pkcs15emu_sc_hsm_add_prkd(p15card, filelist[i + 1]); break; case DCOD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_dcod(p15card, filelist[i + 1]); break; case CD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_cd(p15card, filelist[i + 1]); break; } if (r != SC_SUCCESS) { sc_log(card->ctx, "Error %d adding elements to framework", r); } } LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && (opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK)) { return sc_pkcs15emu_sc_hsm_init(p15card); } else { if (p15card->card->type != SC_CARD_TYPE_SC_HSM && p15card->card->type != SC_CARD_TYPE_SC_HSM_SOC && p15card->card->type != SC_CARD_TYPE_SC_HSM_GOID) { return SC_ERROR_WRONG_CARD; } return sc_pkcs15emu_sc_hsm_init(p15card); } }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_345_6
crossvul-cpp_data_bad_3501_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3501_0
crossvul-cpp_data_good_2413_0
/* * inode.c * * PURPOSE * Inode handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/04/98 dgb Added rudimentary directory functions * 10/07/98 Fully working udf_block_map! It works! * 11/25/98 bmap altered to better support extents * 12/06/98 blf partition support in udf_iget, udf_block_map * and udf_read_inode * 12/12/98 rewrote udf_block_map to handle next extents and descs across * block boundaries (which is not actually allowed) * 12/20/98 added support for strategy 4096 * 03/07/99 rewrote udf_block_map (again) * New funcs, inode_bmap, udf_next_aext * 04/19/99 Support for writing device EA's for major/minor # */ #include "udfdecl.h" #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include <linux/mpage.h> #include <linux/aio.h> #include "udf_i.h" #include "udf_sb.h" MODULE_AUTHOR("Ben Fennema"); MODULE_DESCRIPTION("Universal Disk Format Filesystem"); MODULE_LICENSE("GPL"); #define EXTENT_MERGE_SIZE 5 static umode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static sector_t inode_getblk(struct inode *, sector_t, int *, int *); static int8_t udf_insert_aext(struct inode *, struct extent_position, struct kernel_lb_addr, uint32_t); static void udf_split_extents(struct inode *, int *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_prealloc_extents(struct inode *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_merge_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_update_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int, struct extent_position *); static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); static void __udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->cached_extent.lstart != -1) { brelse(iinfo->cached_extent.epos.bh); iinfo->cached_extent.lstart = -1; } } /* Invalidate extent cache */ static void udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); __udf_clear_extent_cache(inode); spin_unlock(&iinfo->i_extent_cache_lock); } /* Return contents of extent cache */ static int udf_read_extent_cache(struct inode *inode, loff_t bcount, loff_t *lbcount, struct extent_position *pos) { struct udf_inode_info *iinfo = UDF_I(inode); int ret = 0; spin_lock(&iinfo->i_extent_cache_lock); if ((iinfo->cached_extent.lstart <= bcount) && (iinfo->cached_extent.lstart != -1)) { /* Cache hit */ *lbcount = iinfo->cached_extent.lstart; memcpy(pos, &iinfo->cached_extent.epos, sizeof(struct extent_position)); if (pos->bh) get_bh(pos->bh); ret = 1; } spin_unlock(&iinfo->i_extent_cache_lock); return ret; } /* Add extent to extent cache */ static void udf_update_extent_cache(struct inode *inode, loff_t estart, struct extent_position *pos, int next_epos) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); /* Invalidate previously cached extent */ __udf_clear_extent_cache(inode); if (pos->bh) get_bh(pos->bh); memcpy(&iinfo->cached_extent.epos, pos, sizeof(struct extent_position)); iinfo->cached_extent.lstart = estart; if (next_epos) switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: iinfo->cached_extent.epos.offset -= sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: iinfo->cached_extent.epos.offset -= sizeof(struct long_ad); } spin_unlock(&iinfo->i_extent_cache_lock); } void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; udf_clear_extent_cache(inode); if (want_delete) { udf_free_inode(inode); } } static void udf_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = inode->i_size; if (to > isize) { truncate_pagecache(inode, isize); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } } } static int udf_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, udf_get_block, wbc); } static int udf_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, udf_get_block); } static int udf_readpage(struct file *file, struct page *page) { return mpage_readpage(page, udf_get_block); } static int udf_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); if (unlikely(ret)) udf_write_failed(mapping, pos + len); return ret; } static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block); if (unlikely(ret < 0 && (rw & WRITE))) udf_write_failed(mapping, offset + count); return ret; } static sector_t udf_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, udf_get_block); } const struct address_space_operations udf_aops = { .readpage = udf_readpage, .readpages = udf_readpages, .writepage = udf_writepage, .writepages = udf_writepages, .write_begin = udf_write_begin, .write_end = generic_write_end, .direct_IO = udf_direct_IO, .bmap = udf_bmap, }; /* * Expand file stored in ICB to a normal one-block-file * * This function requires i_data_sem for writing and releases it. * This function requires i_mutex held */ int udf_expand_file_adinicb(struct inode *inode) { struct page *page; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; struct writeback_control udf_wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, }; WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); if (!iinfo->i_lenAlloc) { if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } /* * Release i_data_sem so that we can lock a page - page lock ranks * above i_data_sem. i_mutex still protects us against file changes. */ up_write(&iinfo->i_data_sem); page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_CACHE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); } down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); kaddr = kmap(page); down_write(&iinfo->i_data_sem); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap(page); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } page_cache_release(page); mark_inode_dirty(inode); return err; } struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, int *err) { int newblock; struct buffer_head *dbh = NULL; struct kernel_lb_addr eloc; uint8_t alloctype; struct extent_position epos; struct udf_fileident_bh sfibh, dfibh; loff_t f_pos = udf_ext0_offset(inode); int size = udf_ext0_offset(inode) + inode->i_size; struct fileIdentDesc cfi, *sfi, *dfi; struct udf_inode_info *iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) alloctype = ICBTAG_FLAG_AD_SHORT; else alloctype = ICBTAG_FLAG_AD_LONG; if (!inode->i_size) { iinfo->i_alloc_type = alloctype; mark_inode_dirty(inode); return NULL; } /* alloc block, and copy data to it */ *block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, err); if (!(*block)) return NULL; newblock = udf_get_pblock(inode->i_sb, *block, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; dbh = udf_tgetblk(inode->i_sb, newblock); if (!dbh) return NULL; lock_buffer(dbh); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(dbh); unlock_buffer(dbh); mark_buffer_dirty_inode(dbh, inode); sfibh.soffset = sfibh.eoffset = f_pos & (inode->i_sb->s_blocksize - 1); sfibh.sbh = sfibh.ebh = NULL; dfibh.soffset = dfibh.eoffset = 0; dfibh.sbh = dfibh.ebh = dbh; while (f_pos < size) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); if (!sfi) { brelse(dbh); return NULL; } iinfo->i_alloc_type = alloctype; sfi->descTag.tagLocation = cpu_to_le32(*block); dfibh.soffset = dfibh.eoffset; dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; brelse(dbh); return NULL; } } mark_buffer_dirty_inode(dbh, inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; iinfo->i_lenExtents = inode->i_size; epos.bh = NULL; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); /* UniqueID stuff */ brelse(epos.bh); mark_inode_dirty(inode); return dbh; } static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { int err, new; sector_t phys = 0; struct udf_inode_info *iinfo; if (!create) { phys = udf_block_map(inode, block); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } err = -EIO; new = 0; iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); if (block == iinfo->i_next_alloc_block + 1) { iinfo->i_next_alloc_block++; iinfo->i_next_alloc_goal++; } udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); if (!phys) goto abort; if (new) set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, phys); abort: up_write(&iinfo->i_data_sem); return err; } static struct buffer_head *udf_getblk(struct inode *inode, long block, int create, int *err) { struct buffer_head *bh; struct buffer_head dummy; dummy.b_state = 0; dummy.b_blocknr = -1000; *err = udf_get_block(inode, block, &dummy, create); if (!*err && buffer_mapped(&dummy)) { bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (buffer_new(&dummy)) { lock_buffer(bh); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); } return bh; } return NULL; } /* Extend the file by 'blocks' blocks, return the number of extents added */ static int udf_do_extend_file(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, sector_t blocks) { sector_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; struct kernel_lb_addr prealloc_loc = {}; int prealloc_len = 0; struct udf_inode_info *iinfo; int err; /* The previous extent is fake and we should not extend by anything * - there's nothing to do... */ if (!blocks && fake) return 0; iinfo = UDF_I(inode); /* Round the last extent up to a multiple of block size */ if (last_ext->extLength & (sb->s_blocksize - 1)) { last_ext->extLength = (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); } /* Last extent are just preallocated blocks? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { /* Save the extent so that we can reattach it to the end */ prealloc_loc = last_ext->extLocation; prealloc_len = last_ext->extLength; /* Mark the extent as a hole */ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; } /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; if (add > blocks) add = blocks; blocks -= add; last_ext->extLength += add << sb->s_blocksize_bits; } if (fake) { udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); count++; } else udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); /* Managed to do everything necessary? */ if (!blocks) goto out; /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; add = (1 << (30-sb->s_blocksize_bits)) - 1; last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); /* Create enough extents to cover the whole hole */ while (blocks > add) { blocks -= add; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } if (blocks) { last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (blocks << sb->s_blocksize_bits); err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } out: /* Do we have some preallocated blocks saved? */ if (prealloc_len) { err = udf_add_aext(inode, last_pos, &prealloc_loc, prealloc_len, 1); if (err) return err; last_ext->extLocation = prealloc_loc; last_ext->extLength = prealloc_len; count++; } /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) last_pos->offset -= sizeof(struct long_ad); else return -EIO; return count; } static int udf_extend_file(struct inode *inode, loff_t newsize) { struct extent_position epos; struct kernel_lb_addr eloc; uint32_t elen; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); /* File has extent covering the new size (could happen when extending * inside a block)? */ if (etype != -1) return 0; if (newsize & (sb->s_blocksize - 1)) offset++; /* Extended file just to the boundary of the last file block? */ if (offset == 0) return 0; /* Truncate is extending the file by 'offset' blocks */ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { /* File has no extents at all or has empty last * indirect extent! Create a fake extent... */ extent.extLocation.logicalBlockNum = 0; extent.extLocation.partitionReferenceNum = 0; extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; } else { epos.offset -= adsize; etype = udf_next_aext(inode, &epos, &extent.extLocation, &extent.extLength, 0); extent.extLength |= etype << 30; } err = udf_do_extend_file(inode, &epos, &extent, offset); if (err < 0) goto out; err = 0; iinfo->i_lenExtents = newsize; out: brelse(epos.bh); return err; } static sector_t inode_getblk(struct inode *inode, sector_t block, int *err, int *new) { struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; struct extent_position prev_epos, cur_epos, next_epos; int count = 0, startnum = 0, endnum = 0; uint32_t elen = 0, tmpelen; struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; uint32_t newblocknum, newblock; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; bool isBeyondEOF; *err = 0; *new = 0; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; prev_epos.bh = NULL; cur_epos = next_epos = prev_epos; b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; /* find the extent which contains the block we are looking for. alternate between laarr[0] and laarr[1] for locations of the current extent, and the previous extent */ do { if (prev_epos.bh != cur_epos.bh) { brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); if (etype == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); udf_write_aext(inode, &cur_epos, &eloc, elen, 1); } brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); return newblock; } /* Are we beyond EOF? */ if (etype == -1) { int ret; isBeyondEOF = 1; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(struct kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_do_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); if (ret < 0) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = ret; return 0; } c = 0; offset = 0; count += ret; /* We are not covered by a preallocated extent? */ if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { /* Is there any real extent? - otherwise we overwrite * the fake one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(struct kernel_lb_addr)); count++; } endnum = c + 1; lastblock = 1; } else { isBeyondEOF = 0; endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); if (etype != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else lastblock = 1; } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) newblocknum = laarr[c].extLocation.logicalBlockNum + offset; else { /* otherwise, allocate a new block */ if (iinfo->i_next_alloc_block == block) goal = iinfo->i_next_alloc_goal; if (!goal) { if (!(goal = pgoal)) /* XXX: what was intended here? */ goal = iinfo->i_location.logicalBlockNum + 1; } newblocknum = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, goal, err); if (!newblocknum) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = -ENOSPC; return 0; } if (isBeyondEOF) iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple * blocks, split the extent into at most three extents. blocks prior * to requested block, requested block, and blocks after requested * block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); #ifdef UDF_PREALLOCATE /* We preallocate blocks only for regular files. It also makes sense * for directories but there's a problem when to drop the * preallocation. We might use some delayed work for that but I feel * it's overengineering for a filesystem like UDF. */ if (S_ISREG(inode->i_mode)) udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); #endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) { *err = -EIO; return 0; } *new = 1; iinfo->i_next_alloc_block = block; iinfo->i_next_alloc_goal = newblocknum; inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return newblock; } static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation. partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << blocksize_bits); curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I(inode)->i_location.partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += offset + 1; laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << blocksize_bits); curr++; (*endnum)++; } } } static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(struct long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation. partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(struct long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I(inode)->i_lenExtents += numalloc << inode->i_sb->s_blocksize_bits; } } } static void udf_merge_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int i; unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; for (i = 0; i < (*endnum - 1); i++) { struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; if (((li->extLength >> 30) == (lip1->extLength >> 30)) && (((li->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((lip1->extLocation.logicalBlockNum - li->extLocation.logicalBlockNum) == (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; lip1->extLocation.logicalBlockNum = li->extLocation.logicalBlockNum + ((li->extLength & UDF_EXTENT_LENGTH_MASK) >> blocksize_bits); } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if (((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((lip1->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; li->extLength = (li->extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } } } static void udf_update_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, struct extent_position *epos) { int start = 0, i; struct kernel_lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, &laarr[i].extLocation, laarr[i].extLength, 1); } } struct buffer_head *udf_bread(struct inode *inode, int block, int create, int *err) { struct buffer_head *bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL; } int udf_setsize(struct inode *inode, loff_t newsize) { int err; struct udf_inode_info *iinfo; int bsize = 1 << inode->i_blkbits; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; iinfo = UDF_I(inode); if (newsize > inode->i_size) { down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (bsize < (udf_file_entry_alloc_offset(inode) + newsize)) { err = udf_expand_file_adinicb(inode); if (err) return err; down_write(&iinfo->i_data_sem); } else { iinfo->i_lenAlloc = newsize; goto set_size; } } err = udf_extend_file(inode, newsize); if (err) { up_write(&iinfo->i_data_sem); return err; } set_size: truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); goto update_time; } err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) return err; down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); truncate_setsize(inode, newsize); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } update_time: inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return 0; } /* * Maximum length of linked list formed by ICB hierarchy. The chosen number is * arbitrary - just that we hopefully don't limit any real use of rewritten * inode on write-once media but avoid looping for too long on corrupted media. */ #define UDF_MAX_ICB_NESTING 1024 static int udf_read_inode(struct inode *inode, bool hidden_inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); struct udf_sb_info *sbi = UDF_SB(inode->i_sb); struct kernel_lb_addr *iloc = &iinfo->i_location; unsigned int link_count; unsigned int indirections = 0; int ret = -EIO; reread: if (iloc->logicalBlockNum >= sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { udf_debug("block=%d, partition=%d out of range\n", iloc->logicalBlockNum, iloc->partitionReferenceNum); return -EIO; } /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); return -EIO; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", inode->i_ino, ident); goto out; } fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength) { brelse(ibh); memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); if (++indirections > UDF_MAX_ICB_NESTING) { udf_err(inode->i_sb, "too many ICBs in ICB hierarchy" " (max %d supported)\n", UDF_MAX_ICB_NESTING); goto out; } brelse(bh); goto reread; } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); goto out; } if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); if (ret) goto out; memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); if (ret) goto out; memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); if (ret) goto out; memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return 0; } ret = -EIO; read_lock(&sbi->s_cred_lock); i_uid_write(inode, le32_to_cpu(fe->uid)); if (!uid_valid(inode->i_uid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; i_gid_write(inode, le32_to_cpu(fe->gid)); if (!gid_valid(inode->i_gid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) { if (!hidden_inode) { ret = -ESTALE; goto out; } link_count = 1; } set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); } inode->i_generation = iinfo->i_unique; /* Sanity checks for files in ICB so that we don't get confused later */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { /* * For file in ICB data is stored in allocation descriptor * so sizes should match */ if (iinfo->i_lenAlloc != inode->i_size) goto out; /* File in ICB has to fit in there... */ if (inode->i_size > inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode)) goto out; } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", inode->i_ino, fe->icbTag.fileType); goto out; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else goto out; } ret = 0; out: brelse(bh); return ret; } static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); if (!iinfo->i_ext.i_data) { udf_err(inode->i_sb, "(ino %ld) no free memory\n", inode->i_ino); return -ENOMEM; } return 0; } static umode_t udf_convert_permissions(struct fileEntry *fe) { umode_t mode; uint32_t permissions; uint32_t flags; permissions = le32_to_cpu(fe->permissions); flags = le16_to_cpu(fe->icbTag.flags); mode = ((permissions) & S_IRWXO) | ((permissions >> 2) & S_IRWXG) | ((permissions >> 4) & S_IRWXU) | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); return mode; } int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } static int udf_update_inode(struct inode *inode, int do_sync) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint64_t lb_recorded; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; int err = 0; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; struct udf_inode_info *iinfo = UDF_I(inode); bh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); return -ENOMEM; } lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (iinfo->i_use) { struct unallocSpaceEntry *use = (struct unallocSpaceEntry *)bh->b_data; use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); use->descTag.tagLocation = cpu_to_le32(iinfo->i_location.logicalBlockNum); crclen = sizeof(struct unallocSpaceEntry) + iinfo->i_lenAlloc - sizeof(struct tag); use->descTag.descCRCLength = cpu_to_le16(crclen); use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + sizeof(struct tag), crclen)); use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); goto out; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) fe->uid = cpu_to_le32(-1); else fe->uid = cpu_to_le32(i_uid_read(inode)); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) fe->gid = cpu_to_le32(-1); else fe->gid = cpu_to_le32(i_gid_read(inode)); udfperms = ((inode->i_mode & S_IRWXO)) | ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4); udfperms |= (le32_to_cpu(fe->permissions) & (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); fe->permissions = cpu_to_le32(udfperms); if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); else fe->fileLinkCount = cpu_to_le16(inode->i_nlink); fe->informationLength = cpu_to_le64(inode->i_size); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct regid *eid; struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (!dsea) { dsea = (struct deviceSpec *) udf_add_extendedattr(inode, sizeof(struct deviceSpec) + sizeof(struct regid), 12, 0x3); dsea->attrType = cpu_to_le32(12); dsea->attrSubtype = 1; dsea->attrLength = cpu_to_le32( sizeof(struct deviceSpec) + sizeof(struct regid)); dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); } eid = (struct regid *)dsea->impUse; memset(eid, 0, sizeof(struct regid)); strcpy(eid->ident, UDF_ID_DEVELOPER); eid->identSuffix[0] = UDF_OS_CLASS_UNIX; eid->identSuffix[1] = UDF_OS_ID_LINUX; dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) lb_recorded = 0; /* No extents => no blocks! */ else lb_recorded = (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9); if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime); memset(&(fe->impIdent), 0, sizeof(struct regid)); strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size); efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) iinfo->i_crtime = inode->i_atime; if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) iinfo->i_crtime = inode->i_mtime; if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) iinfo->i_crtime = inode->i_ctime; udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime); memset(&(efe->impIdent), 0, sizeof(struct regid)); strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); fe->icbTag.numEntries = cpu_to_le16(2); } else { fe->icbTag.strategyType = cpu_to_le16(4); fe->icbTag.numEntries = cpu_to_le16(1); } if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; else if (S_ISLNK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; else if (S_ISBLK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; else if (S_ISCHR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; else if (S_ISFIFO(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; else if (S_ISSOCK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; icbflags = iinfo->i_alloc_type | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | (le16_to_cpu(fe->icbTag.flags) & ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); fe->icbTag.flags = cpu_to_le16(icbflags); if (sbi->s_udfrev >= 0x0200) fe->descTag.descVersion = cpu_to_le16(3); else fe->descTag.descVersion = cpu_to_le16(2); fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); fe->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); fe->descTag.descCRCLength = cpu_to_le16(crclen); fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); out: set_buffer_uptodate(bh); unlock_buffer(bh); /* write the data blocks */ mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n", inode->i_ino); err = -EIO; } } brelse(bh); return err; } struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, bool hidden_inode) { unsigned long block = udf_get_lb_pblock(sb, ino, 0); struct inode *inode = iget_locked(sb, block); int err; if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); err = udf_read_inode(inode, hidden_inode); if (err < 0) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } int udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; struct short_ad *sad = NULL; struct long_ad *lad = NULL; struct allocExtDesc *aed; uint8_t *ptr; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { unsigned char *sptr, *dptr; struct buffer_head *nbh; int err, loffset; struct kernel_lb_addr obloc = epos->block; epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, obloc.partitionReferenceNum, obloc.logicalBlockNum, &err); if (!epos->block.logicalBlockNum) return -ENOSPC; nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &epos->block, 0)); if (!nbh) return -EIO; lock_buffer(nbh); memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(nbh); unlock_buffer(nbh); mark_buffer_dirty_inode(nbh, inode); aed = (struct allocExtDesc *)(nbh->b_data); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); if (epos->offset + adsize > inode->i_sb->s_blocksize) { loffset = epos->offset; aed->lengthAllocDescs = cpu_to_le32(adsize); sptr = ptr - adsize; dptr = nbh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos->offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos->offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); sptr = ptr; epos->offset = sizeof(struct allocExtDesc); if (epos->bh) { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); } else { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } } if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200) udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, epos->block.logicalBlockNum, sizeof(struct tag)); else udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, epos->block.logicalBlockNum, sizeof(struct tag)); switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)sptr; sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)sptr; lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); lad->extLocation = cpu_to_lelb(epos->block); memset(lad->impUse, 0x00, sizeof(lad->impUse)); break; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, loffset); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); brelse(epos->bh); } else { mark_inode_dirty(inode); } epos->bh = nbh; } udf_write_aext(inode, epos, eloc, elen, inc); if (!epos->bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); } return 0; } void udf_write_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)ptr; sad->extLength = cpu_to_le32(elen); sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); adsize = sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)ptr; lad->extLength = cpu_to_le32(elen); lad->extLocation = cpu_to_lelb(*eloc); memset(lad->impUse, 0x00, sizeof(lad->impUse)); adsize = sizeof(struct long_ad); break; default: return; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; udf_update_tag(epos->bh->b_data, le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); } mark_buffer_dirty_inode(epos->bh, inode); } else { mark_inode_dirty(inode); } if (inc) epos->offset += adsize; } int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { int block; epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = udf_tread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %d failed!\n", block); return -1; } } return etype; } int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int alen; int8_t etype; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc; } else { if (!epos->offset) epos->offset = sizeof(struct allocExtDesc); ptr = epos->bh->b_data + epos->offset; alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> lengthAllocDescs); } switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); if (!sad) return -1; etype = le32_to_cpu(sad->extLength) >> 30; eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); eloc->partitionReferenceNum = iinfo->i_location.partitionReferenceNum; *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; break; case ICBTAG_FLAG_AD_LONG: lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); if (!lad) return -1; etype = le32_to_cpu(lad->extLength) >> 30; *eloc = lelb_to_cpu(lad->extLocation); *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; break; default: udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type); return -1; } return etype; } static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr neloc, uint32_t nelen) { struct kernel_lb_addr oeloc; uint32_t oelen; int8_t etype; if (epos.bh) get_bh(epos.bh); while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { udf_write_aext(inode, &epos, &neloc, nelen, 1); neloc = oeloc; nelen = (etype << 30) | oelen; } udf_add_aext(inode, &epos, &neloc, nelen, 1); brelse(epos.bh); return (nelen >> 30); } int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr eloc, uint32_t elen) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; if (epos.bh) { get_bh(epos.bh); get_bh(epos.bh); } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; oepos = epos; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) return -1; while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); if (oepos.bh != epos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = epos.offset - adsize; } } memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); elen = 0; if (epos.bh != oepos.bh) { udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= (adsize * 2); mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } else { udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, epos.offset - adsize); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } brelse(epos.bh); brelse(oepos.bh); return (elen >> 30); } int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) { unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; int8_t etype; struct udf_inode_info *iinfo; iinfo = UDF_I(inode); if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) { pos->offset = 0; pos->block = iinfo->i_location; pos->bh = NULL; } *elen = 0; do { etype = udf_next_aext(inode, pos, eloc, elen, 1); if (etype == -1) { *offset = (bcount - lbcount) >> blocksize_bits; iinfo->i_lenExtents = lbcount; return -1; } lbcount += *elen; } while (lbcount <= bcount); /* update extent cache */ udf_update_extent_cache(inode, lbcount - *elen, pos, 1); *offset = (bcount + *elen - lbcount) >> blocksize_bits; return etype; } long udf_block_map(struct inode *inode, sector_t block) { struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; int ret; down_read(&UDF_I(inode)->i_data_sem); if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset); else ret = 0; up_read(&UDF_I(inode)->i_data_sem); brelse(epos.bh); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) return udf_fixed_to_variable(ret); else return ret; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_2413_0
crossvul-cpp_data_bad_5743_0
/* * IPv6 output functions * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/net/ipv4/ip_output.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * A.N.Kuznetsov : airthmetics in fragmentation. * extension headers are implemented. * route changes now work. * ip6_forward does not confuse sniffers. * etc. * * H. von Brand : Added missing #include <linux/string.h> * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/tcp.h> #include <linux/route.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/rawv6.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/checksum.h> #include <linux/mroute6.h> static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; struct in6_addr *nexthop; int ret; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= IPV6_ADDR_SCOPE_NODELOCAL && !(dev->flags & IFF_LOOPBACK)) { kfree_skb(skb); return 0; } } rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; } static int ip6_finish_output(struct sk_buff *skb) { if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb))) return ip6_fragment(skb, ip6_finish_output2); else return ip6_finish_output2(skb); } int ip6_output(struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, ip6_finish_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } /* * xmit an sk_buff (used by TCP, SCTP and DCCP) */ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt, int tclass) { struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } consume_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); ip6_flow_hdr(hdr, tclass, fl6->flowlabel); hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; hdr->saddr = fl6->saddr; hdr->daddr = *first_hop; skb->protocol = htons(ETH_P_IPV6); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } skb->dev = dst->dev; ipv6_local_error(sk, EMSGSIZE, fl6, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } EXPORT_SYMBOL(ip6_xmit); static int ip6_call_ra_chain(struct sk_buff *skb, int sel) { struct ip6_ra_chain *ra; struct sock *last = NULL; read_lock(&ip6_ra_lock); for (ra = ip6_ra_chain; ra; ra = ra->next) { struct sock *sk = ra->sk; if (sk && ra->sel == sel && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == skb->dev->ifindex)) { if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) rawv6_rcv(last, skb2); } last = sk; } } if (last) { rawv6_rcv(last, skb); read_unlock(&ip6_ra_lock); return 1; } read_unlock(&ip6_ra_lock); return 0; } static int ip6_forward_proxy_check(struct sk_buff *skb) { struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; if (ipv6_ext_hdr(nexthdr)) { offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) return 0; } else offset = sizeof(struct ipv6hdr); if (nexthdr == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6; if (!pskb_may_pull(skb, (skb_network_header(skb) + offset + 1 - skb->data))) return 0; icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: /* For reaction involving unicast neighbor discovery * message destined to the proxied address, pass it to * input function. */ return 1; default: break; } } /* * The proxying router can't forward traffic sent to a link-local * address, so signal the sender and discard the packet. This * behavior is clarified by the MIPv6 specification. */ if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { dst_link_failure(skb); return -1; } return 0; } static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); } int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { if (ip6_call_ra_chain(skb, ntohs(opt->ra))) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct inet_peer *peer; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) target = &rt->rt6i_gateway; else target = &hdr->daddr; peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(peer, 1*HZ)) ndisc_send_redirect(skb, target); if (peer) inet_putpeer(peer); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; } static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); skb_dst_set(to, dst_clone(skb_dst(from))); to->dev = from->dev; to->mark = from->mark; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif nf_copy(to, from); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) to->nf_trace = from->nf_trace; #endif skb_copy_secmark(to, from); } int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (unlikely(!skb->local_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); ip6_rt_put(rt); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); ip6_rt_put(rt); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) goto fail; left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); consume_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; } static inline int ip6_rt_check(const struct rt6key *rt_key, const struct in6_addr *fl_addr, const struct in6_addr *addr_cache) { return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); } static struct dst_entry *ip6_sk_dst_check(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt; if (!dst) goto out; if (dst->ops->family != AF_INET6) { dst_release(dst); return NULL; } rt = (struct rt6_info *)dst; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. * If it is network route, we still may * check its validity using saved pointer * to the last used address: daddr_cache. * We do not want to save whole address now, * (because main consumer of this service * is tcp, which has not this problem), * so that the last trick works only on connected * sockets. * 2. oif also should be the same. */ if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { dst_release(dst); dst = NULL; } out: return dst; } static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { struct net *net = sock_net(sk); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD struct neighbour *n; struct rt6_info *rt; #endif int err; if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ rt = (struct rt6_info *) *dst; rcu_read_lock_bh(); n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; rcu_read_unlock_bh(); if (err) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; int redirect; ifp = ipv6_get_ifaddr(net, &fl6->saddr, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); if ((err = (*dst)->error)) goto out_err_release; } } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; } /** * ip6_dst_lookup - perform route lookup on flow * @sk: socket which provides route info * @dst: pointer to dst_entry * for result * @fl6: flow to lookup * * This function performs a route lookup on the given flow. * * It returns zero on success, or a standard errno code on error. */ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { *dst = NULL; return ip6_dst_lookup_tail(sk, dst, fl6); } EXPORT_SYMBOL_GPL(ip6_dst_lookup); /** * ip6_dst_lookup_flow - perform route lookup on flow with ipsec * @sk: socket which provides route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = NULL; int err; err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); /** * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow * @sk: socket which provides the dst cache and route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow with the * possibility of using the cached route in the socket if it is valid. * It will take the socket dst lock when operating on the dst cache. * As a result, this function can only be used in process context. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); int err; dst = ip6_sk_dst_check(sk, dst, fl6); err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu,unsigned int flags, struct rt6_info *rt) { struct sk_buff *skb; int err; /* There is support for UDP large send offload by network * device, so create one single skb packet containing complete * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); if (skb == NULL) return err; /* reserve space for Hardware header */ skb_reserve(skb, hh_len); /* create space for UDP/IP header */ skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; } err = skb_append_datato_frags(sk,skb, getfrag, from, (length - transhdrlen)); if (!err) { struct frag_hdr fhdr; /* Specify the length of each IPv6 datagram fragment. * It has to be a multiple of 8. */ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - sizeof(struct frag_hdr)) & ~7; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ipv6_select_ident(&fhdr, rt); skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); return 0; } /* There is not enough support do UPD LSO, * so follow normal path */ kfree_skb(skb); return err; } static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static void ip6_append_data_mtu(unsigned int *mtu, int *maxfraglen, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt, bool pmtuprobe) { if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { if (skb == NULL) { /* first fragment, reserve header_len */ *mtu = *mtu - rt->dst.header_len; } else { /* * this fragment is not first, the headers * space is regarded as data space. */ *mtu = min(*mtu, pmtuprobe ? rt->dst.dev->mtu : dst_mtu(rt->dst.path)); } *maxfraglen = ((*mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); } } int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen, mtu; int exthdrlen; int dst_exthdrlen; int hh_len; int copy; int err; int offset = 0; __u8 tx_flags = 0; if (flags&MSG_PROBE) return 0; cork = &inet->cork.base; if (skb_queue_empty(&sk->sk_write_queue)) { /* * setup for corking */ if (opt) { if (WARN_ON(np->cork.opt)) return -EINVAL; np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; np->cork.opt->tot_len = opt->tot_len; np->cork.opt->opt_flen = opt->opt_flen; np->cork.opt->opt_nflen = opt->opt_nflen; np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation); if (opt->dst0opt && !np->cork.opt->dst0opt) return -ENOBUFS; np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation); if (opt->dst1opt && !np->cork.opt->dst1opt) return -ENOBUFS; np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation); if (opt->hopopt && !np->cork.opt->hopopt) return -ENOBUFS; np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation); if (opt->srcrt && !np->cork.opt->srcrt) return -ENOBUFS; /* need source address above miyazawa*/ } dst_hold(&rt->dst); cork->dst = &rt->dst; inet->cork.fl.u.ip6 = *fl6; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(&rt->dst); else mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(rt->dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } cork->fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->flags |= IPCORK_ALLFRAG; cork->length = 0; exthdrlen = (opt ? opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; } else { rt = (struct rt6_info *)cork->dst; fl6 = &inet->cork.fl.u.ip6; opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; mtu = cork->fragsize; } hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); return -EMSGSIZE; } } /* For UDP, check if TX timestamp is enabled */ if (sk->sk_type == SOCK_DGRAM) sock_tx_timestamp(sk, &tx_flags); /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. * Otherwise, we need to reserve fragment header and * fragment alignment (= 8-15 octects, in total). * * Note that we may need to "move" the data from the tail of * of the buffer to the new fragment when we split * the message. * * FIXME: It may be fragmented into multiple chunks * at once if non-fragmentable extension headers * are too large. * --yoshfuji */ cork->length += length; if (length > mtu) { int proto = sk->sk_protocol; if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); return -EMSGSIZE; } if (proto == IPPROTO_UDP && (rt->dst.dev->features & NETIF_F_UFO)) { err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags, rt); if (err) goto error; return 0; } } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { /* Check if the remaining data fits into current packet. */ copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; if (copy < length) copy = maxfraglen - skb->len; if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int fraggap; unsigned int alloclen; alloc_new_skb: /* There's no room in the current skb */ if (skb) fraggap = skb->len - maxfraglen; else fraggap = 0; /* update mtu and maxfraglen if necessary */ if (skb == NULL || skb_prev == NULL) ip6_append_data_mtu(&mtu, &maxfraglen, fragheaderlen, skb, rt, np->pmtudisc == IPV6_PMTUDISC_PROBE); skb_prev = skb; /* * If remaining data exceeds the mtu, * we know we need more fragment(s). */ datalen = length + fraggap; if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; if ((flags & MSG_MORE) && !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; alloclen += dst_exthdrlen; if (datalen != length + fraggap) { /* * this is not the last fragment, the trailer * space is regarded as data space. */ datalen += rt->dst.trailer_len; } alloclen += rt->dst.trailer_len; fraglen = datalen + fragheaderlen; /* * We just reserve space for fragment header. * Note: this may be overallocation if the message * (without MSG_MORE) fits into the MTU. */ alloclen += sizeof(struct frag_hdr); if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (atomic_read(&sk->sk_wmem_alloc) <= 2 * sk->sk_sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len, 1, sk->sk_allocation); if (unlikely(skb == NULL)) err = -ENOBUFS; else { /* Only the initial fragment * is time stamped. */ tx_flags = 0; } } if (skb == NULL) goto error; /* * Fill in the control structures */ skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; /* reserve for fragmentation and ipsec header */ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + dst_exthdrlen); if (sk->sk_type == SOCK_DGRAM) skb_shinfo(skb)->tx_flags = tx_flags; /* * Find where to start putting bytes */ data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); data += fragheaderlen; skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, data + transhdrlen, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { err = -EINVAL; kfree_skb(skb); goto error; } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; /* * Put the packet on the pending queue */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } if (copy > length) copy = length; if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); err = -ENOMEM; if (!sk_page_frag_refill(sk, pfrag)) goto error; if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { err = -EMSGSIZE; if (i == MAX_SKB_FRAGS) goto error; __skb_fill_page_desc(skb, i, pfrag->page, pfrag->offset, 0); skb_shinfo(skb)->nr_frags = ++i; get_page(pfrag->page); } copy = min_t(int, copy, pfrag->size - pfrag->offset); if (getfrag(from, page_address(pfrag->page) + pfrag->offset, offset, copy, skb->len, skb) < 0) goto error_efault; pfrag->offset += copy; skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); skb->len += copy; skb->data_len += copy; skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); } offset += copy; length -= copy; } return 0; error_efault: err = -EFAULT; error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } EXPORT_SYMBOL_GPL(ip6_append_data); static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { if (np->cork.opt) { kfree(np->cork.opt->dst0opt); kfree(np->cork.opt->dst1opt); kfree(np->cork.opt->hopopt); kfree(np->cork.opt->srcrt); kfree(np->cork.opt); np->cork.opt = NULL; } if (inet->cork.base.dst) { dst_release(inet->cork.base.dst); inet->cork.base.dst = NULL; inet->cork.base.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); } int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst; struct flowi6 *fl6 = &inet->cork.fl.u.ip6; unsigned char proto = fl6->flowi6_proto; int err = 0; if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Allow local fragmentation. */ if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel); hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; hdr->saddr = fl6->saddr; hdr->daddr = *final_dst; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) goto error; } out: ip6_cork_release(inet, np); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } EXPORT_SYMBOL_GPL(ip6_push_pending_frames); void ip6_flush_pending_frames(struct sock *sk) { struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { if (skb_dst(skb)) IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } ip6_cork_release(inet_sk(sk), inet6_sk(sk)); } EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5743_0
crossvul-cpp_data_good_292_0
/* Pango * pango-emoji.c: Emoji handling * * Copyright (C) 2017 Google, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Implementation of pango_emoji_iter is derived from Chromium: * * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/fonts/FontFallbackPriority.h * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/text/CharacterEmoji.cpp * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/fonts/SymbolsIterator.cpp * * // Copyright 2015 The Chromium Authors. All rights reserved. * // Use of this source code is governed by a BSD-style license that can be * // found in the LICENSE file. */ #include "config.h" #include <stdlib.h> #include <string.h> #include "pango-emoji-private.h" #include "pango-emoji-table.h" static int interval_compare (const void *key, const void *elt) { gunichar c = GPOINTER_TO_UINT (key); struct Interval *interval = (struct Interval *)elt; if (c < interval->start) return -1; if (c > interval->end) return +1; return 0; } #define DEFINE_pango_Is_(name) \ static gboolean \ _pango_Is_##name (gunichar ch) \ { \ /* bsearch() is declared attribute(nonnull(1)) so we can't validly search \ * for a NULL key */ \ /* \ if (G_UNLIKELY (ch == 0)) \ return FALSE; \ */ \ \ if (bsearch (GUINT_TO_POINTER (ch), \ _pango_##name##_table, \ G_N_ELEMENTS (_pango_##name##_table), \ sizeof _pango_##name##_table[0], \ interval_compare)) \ return TRUE; \ \ return FALSE; \ } DEFINE_pango_Is_(Emoji) DEFINE_pango_Is_(Emoji_Presentation) DEFINE_pango_Is_(Emoji_Modifier) DEFINE_pango_Is_(Emoji_Modifier_Base) static gboolean _pango_Is_Emoji_Text_Default (gunichar ch) { return _pango_Is_Emoji (ch) && !_pango_Is_Emoji_Presentation (ch); } static gboolean _pango_Is_Emoji_Emoji_Default (gunichar ch) { return _pango_Is_Emoji_Presentation (ch); } static gboolean _pango_Is_Emoji_Keycap_Base (gunichar ch) { return (ch >= '0' && ch <= '9') || ch == '#' || ch == '*'; } static gboolean _pango_Is_Regional_Indicator (gunichar ch) { return (ch >= 0x1F1E6 && ch <= 0x1F1FF); } const gunichar kCombiningEnclosingCircleBackslashCharacter = 0x20E0; const gunichar kCombiningEnclosingKeycapCharacter = 0x20E3; const gunichar kEyeCharacter = 0x1F441; const gunichar kFemaleSignCharacter = 0x2640; const gunichar kLeftSpeechBubbleCharacter = 0x1F5E8; const gunichar kMaleSignCharacter = 0x2642; const gunichar kRainbowCharacter = 0x1F308; const gunichar kStaffOfAesculapiusCharacter = 0x2695; const gunichar kVariationSelector15Character = 0xFE0E; const gunichar kVariationSelector16Character = 0xFE0F; const gunichar kWavingWhiteFlagCharacter = 0x1F3F3; const gunichar kZeroWidthJoinerCharacter = 0x200D; typedef enum { PANGO_EMOJI_TYPE_INVALID, PANGO_EMOJI_TYPE_TEXT, /* For regular non-symbols text */ PANGO_EMOJI_TYPE_EMOJI_TEXT, /* For emoji in text presentaiton */ PANGO_EMOJI_TYPE_EMOJI_EMOJI /* For emoji in emoji presentation */ } PangoEmojiType; static PangoEmojiType _pango_get_emoji_type (gunichar codepoint) { /* Those should only be Emoji presentation as combinations of two. */ if (_pango_Is_Emoji_Keycap_Base (codepoint) || _pango_Is_Regional_Indicator (codepoint)) return PANGO_EMOJI_TYPE_TEXT; if (codepoint == kCombiningEnclosingKeycapCharacter) return PANGO_EMOJI_TYPE_EMOJI_EMOJI; if (_pango_Is_Emoji_Emoji_Default (codepoint) || _pango_Is_Emoji_Modifier_Base (codepoint) || _pango_Is_Emoji_Modifier (codepoint)) return PANGO_EMOJI_TYPE_EMOJI_EMOJI; if (_pango_Is_Emoji_Text_Default (codepoint)) return PANGO_EMOJI_TYPE_EMOJI_TEXT; return PANGO_EMOJI_TYPE_TEXT; } PangoEmojiIter * _pango_emoji_iter_init (PangoEmojiIter *iter, const char *text, int length) { iter->text_start = text; if (length >= 0) iter->text_end = text + length; else iter->text_end = text + strlen (text); iter->start = text; iter->end = text; iter->is_emoji = (gboolean) 2; /* HACK */ _pango_emoji_iter_next (iter); return iter; } void _pango_emoji_iter_fini (PangoEmojiIter *iter) { } #define PANGO_EMOJI_TYPE_IS_EMOJI(typ) ((typ) == PANGO_EMOJI_TYPE_EMOJI_EMOJI) gboolean _pango_emoji_iter_next (PangoEmojiIter *iter) { PangoEmojiType current_emoji_type = PANGO_EMOJI_TYPE_INVALID; if (iter->end == iter->text_end) return FALSE; iter->start = iter->end; for (; iter->end < iter->text_end; iter->end = g_utf8_next_char (iter->end)) { gunichar ch = g_utf8_get_char (iter->end); /* Except at the beginning, ZWJ just carries over the emoji or neutral * text type, VS15 & VS16 we just carry over as well, since we already * resolved those through lookahead. Also, don't downgrade to text * presentation for emoji that are part of a ZWJ sequence, example * U+1F441 U+200D U+1F5E8, eye (text presentation) + ZWJ + left speech * bubble, see below. */ if ((!(ch == kZeroWidthJoinerCharacter && !iter->is_emoji) && ch != kVariationSelector15Character && ch != kVariationSelector16Character && ch != kCombiningEnclosingCircleBackslashCharacter && !_pango_Is_Regional_Indicator(ch) && !((ch == kLeftSpeechBubbleCharacter || ch == kRainbowCharacter || ch == kMaleSignCharacter || ch == kFemaleSignCharacter || ch == kStaffOfAesculapiusCharacter) && !iter->is_emoji)) || current_emoji_type == PANGO_EMOJI_TYPE_INVALID) { current_emoji_type = _pango_get_emoji_type (ch); } if (g_utf8_next_char (iter->end) < iter->text_end) /* Optimize. */ { gunichar peek_char = g_utf8_get_char (g_utf8_next_char (iter->end)); /* Variation Selectors */ if (current_emoji_type == PANGO_EMOJI_TYPE_EMOJI_EMOJI && peek_char == kVariationSelector15Character) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_TEXT; } if ((current_emoji_type == PANGO_EMOJI_TYPE_EMOJI_TEXT || _pango_Is_Emoji_Keycap_Base(ch)) && peek_char == kVariationSelector16Character) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } /* Combining characters Keycap... */ if (_pango_Is_Emoji_Keycap_Base(ch) && peek_char == kCombiningEnclosingKeycapCharacter) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; }; /* Regional indicators */ if (_pango_Is_Regional_Indicator(ch) && _pango_Is_Regional_Indicator(peek_char)) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } /* Upgrade text presentation emoji to emoji presentation when followed by * ZWJ, Example U+1F441 U+200D U+1F5E8, eye + ZWJ + left speech bubble. */ if ((ch == kEyeCharacter || ch == kWavingWhiteFlagCharacter) && peek_char == kZeroWidthJoinerCharacter) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } } if (iter->is_emoji == (gboolean) 2) iter->is_emoji = !PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); if (iter->is_emoji == PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type)) { iter->is_emoji = !PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); /* Make sure we make progress. Weird sequences, like a VC15 followed * by VC16, can trick us into stalling otherwise. */ if (iter->start == iter->end) iter->end = g_utf8_next_char (iter->end); return TRUE; } } iter->is_emoji = PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); return TRUE; } /********************************************************** * End of code from Chromium **********************************************************/
./CrossVul/dataset_final_sorted/CWE-119/c/good_292_0
crossvul-cpp_data_good_2987_0
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; }; static DEFINE_MUTEX(bpf_verifier_lock); /* log_level controls verbosity level of eBPF verifier. * verbose() is used to dump the verification trace to the log, so the user * can figure out what's wrong with the program */ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, const char *fmt, ...) { struct bpf_verifer_log *log = &env->log; unsigned int n; va_list args; if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) return; va_start(args, fmt); n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); va_end(args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } /* save register state */ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = STACK_MISC; } return 0; } static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) { struct bpf_verifier_state *parent = state->parent; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } } if (value_regno >= 0) /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { bool strict = env->strict_alignment; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } /* ctx accesses must be at a fixed offset, so that we can * determine what type of data were returned. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", regno, reg->off, off - reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state reg) { return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(*reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it * note: regno == len, regno - 1 == buf */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ verbose(env, "ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap, open when use-cases appear */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP) goto error; break; case BPF_FUNC_sk_redirect_map: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL only function from proprietary program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; /* We only support one arg being in raw mode at the moment, which * is sufficient for the helper functions we have right now. */ err = check_raw_mode(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; insn_aux = &env->insn_aux_data[insn_idx]; if (!insn_aux->map_ptr) insn_aux->map_ptr = meta.map_ptr; else if (insn_aux->map_ptr != meta.map_ptr) insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } if (src_known) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. */ if (!env->allow_ptr_leaks) { verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); return 0; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ rc = adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* scalar += unknown scalar */ __mark_reg_unknown(&off_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } else if (ptr_reg) { /* pointer += scalar */ rc = adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += scalar */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, *src_reg); } return rc; } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += K */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch->regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch->regs[insn->src_reg], &other_branch->regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* if we knew anything about the old value, we're not * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ return rold->umin_value == 0 && rold->umax_value == U64_MAX && rold->smin_value == S64_MIN && rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at a * jump target (in the first iteration of the propagate_liveness() loop), * we didn't arrive by the straight-line code, so read marks in state must * propagate to parent regardless of state's write marks. */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; if (!parent) return touched; /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) continue; if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } } /* ... and stack slots */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; if (state->stack[i].slot_type[0] != STACK_SPILL) continue; if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (writes && (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } return touched; } /* "parent" is "a state from which we reach the current state", but initially * it is not the state->parent (i.e. "the state whose straight-line code leads * to the current state"), instead it is the state that happened to arrive at * a (prunable) equivalent of the current state. See comment above * do_propagate_liveness() for consequences of this. * This function is just a more efficient way of calling mark_reg_read() or * mark_stack_slot_read() on each reg in "parent" that is read in "state", * though it requires that parent != state->parent in the call arguments. */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { while (do_propagate_liveness(state, parent)) { /* Something changed, so we need to feed those changes onward */ state = parent; parent = state->parent; } } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach bpf_exit (which means it's safe) or * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->regs[i].live = REG_LIVE_NONE; for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) if (cur->stack[i].slot_type[0] == STACK_SPILL) cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; fdput(f); next_insn: insn++; i++; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; return new_prog; } /* The verifier does more data flow analysis than llvm and will not explore * branches that are dead at run time. Malicious programs can have dead code * too. Therefore replace all dead at-run-time code with nops. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &nop, sizeof(nop)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(ctx_field_size - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * handlers are currently limited to 64 bit only. */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) goto patch_call_imm; cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->imm == BPF_FUNC_redirect_map) { /* Note, we cannot use prog directly as imm as subsequent * rewrites would still change the prog pointer. The only * stable address we can use is aux, which also works with * prog clones during blinding. */ u64 addr = (unsigned long)prog->aux; struct bpf_insn r4_ld[] = { BPF_LD_IMM64(BPF_REG_4, addr), *insn, }; cnt = ARRAY_SIZE(r4_ld); new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifer_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * (*prog)->len); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (env->prog->aux->offload) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; } ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_2987_0
crossvul-cpp_data_bad_5733_5
/* * Copyright (c) 2007 Benoit Fouet * Copyright (c) 2010 Stefano Sabatini * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * horizontal flip filter */ #include <string.h> #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" #include "libavutil/pixdesc.h" #include "libavutil/internal.h" #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" typedef struct { int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes int hsub, vsub; ///< chroma subsampling } FlipContext; static int query_formats(AVFilterContext *ctx) { AVFilterFormats *pix_fmts = NULL; int fmt; for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL || desc->flags & AV_PIX_FMT_FLAG_BITSTREAM || (desc->log2_chroma_w != desc->log2_chroma_h && desc->comp[0].plane == desc->comp[1].plane))) ff_add_format(&pix_fmts, fmt); } ff_set_common_formats(ctx, pix_fmts); return 0; } static int config_props(AVFilterLink *inlink) { FlipContext *s = inlink->dst->priv; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc); s->hsub = pix_desc->log2_chroma_w; s->vsub = pix_desc->log2_chroma_h; return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; FlipContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; uint8_t *inrow, *outrow; int i, j, plane, step; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); /* copy palette if required */ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL) memcpy(out->data[1], in->data[1], AVPALETTE_SIZE); for (plane = 0; plane < 4 && in->data[plane]; plane++) { const int width = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w; const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h; step = s->max_step[plane]; outrow = out->data[plane]; inrow = in ->data[plane] + (width - 1) * step; for (i = 0; i < height; i++) { switch (step) { case 1: for (j = 0; j < width; j++) outrow[j] = inrow[-j]; break; case 2: { uint16_t *outrow16 = (uint16_t *)outrow; uint16_t * inrow16 = (uint16_t *) inrow; for (j = 0; j < width; j++) outrow16[j] = inrow16[-j]; } break; case 3: { uint8_t *in = inrow; uint8_t *out = outrow; for (j = 0; j < width; j++, out += 3, in -= 3) { int32_t v = AV_RB24(in); AV_WB24(out, v); } } break; case 4: { uint32_t *outrow32 = (uint32_t *)outrow; uint32_t * inrow32 = (uint32_t *) inrow; for (j = 0; j < width; j++) outrow32[j] = inrow32[-j]; } break; default: for (j = 0; j < width; j++) memcpy(outrow + j*step, inrow - j*step, step); } inrow += in ->linesize[plane]; outrow += out->linesize[plane]; } } av_frame_free(&in); return ff_filter_frame(outlink, out); } static const AVFilterPad avfilter_vf_hflip_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_props, }, { NULL } }; static const AVFilterPad avfilter_vf_hflip_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_hflip = { .name = "hflip", .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."), .priv_size = sizeof(FlipContext), .query_formats = query_formats, .inputs = avfilter_vf_hflip_inputs, .outputs = avfilter_vf_hflip_outputs, };
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5733_5
crossvul-cpp_data_good_4999_0
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) pr_debug(format, ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) pr_debug(format, ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) WARN_ON(!(x)) #else #define ARP_NF_ASSERT(x) #endif void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); } EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return ret != 0; } /* * Unfortunately, _b and _mask are not aligned to an int (or long int) * Some arches dont care, unrolling the loop is a win on them. * For other arches, we only have a 16bit alignement. */ static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; const u16 *a = (const u16 *)_a; const u16 *b = (const u16 *)_b; const u16 *mask = (const u16 *)_mask; int i; for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) ret |= (a[i] ^ b[i]) & mask[i]; #endif return ret; } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; long ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &src_ipaddr, &arpinfo->smsk.s_addr, &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", &tgt_ipaddr, &arpinfo->tmsk.s_addr, &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); return 0; } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_err_ratelimited("arp_tables: error: '%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline const struct xt_entry_target * arpt_get_target_c(const struct arpt_entry *e) { return arpt_get_target((struct arpt_entry *)e); } static inline struct arpt_entry * get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } static inline struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) { return (void *)entry + entry->next_offset; } unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; /* No TEE support for arptables, so no need to switch to alternate * stack. All targets that reenter must return absolute verdicts. */ e = get_entry(table_base, private->hook_entry[hook]); acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.hooknum = hook; acpar.family = NFPROTO_ARP; acpar.hotdrop = false; arp = arp_hdr(skb); do { const struct xt_entry_target *t; struct xt_counters *counter; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = arpt_next_entry(e); } continue; } if (table_base + v != arpt_next_entry(e)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == XT_CONTINUE) e = arpt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline bool unconditional(const struct arpt_entry *e) { static const struct arpt_arp uncond; return e->target_offset == sizeof(struct arpt_entry) && memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(const struct arpt_entry *e) { const struct xt_entry_target *t; if (!arp_checkentry(&e->arp)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_ARP, }; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); if (ret < 0) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); out: xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(e)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_debug("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static inline void cleanup_entry(struct arpt_entry *e) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) break; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NFPROTO_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct arpt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NFPROTO_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, const int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct arpt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NFPROTO_ARP, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; struct arpt_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { iter1->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(iter1->counters.pcnt)) { ret = -ENOMEM; break; } ret = check_target(iter1, name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; } ++i; if (strcmp(arpt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); goto out; } struct compat_arpt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct arpt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } struct compat_arpt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NFPROTO_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct arpt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __arpt_unregister_table(table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = arpt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_ARP, }, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; pr_info("arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
./CrossVul/dataset_final_sorted/CWE-119/c/good_4999_0
crossvul-cpp_data_bad_942_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(images,complex_images,images->rows,1L) #endif for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register Quantum *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y, MagickMax(Ar_image->columns,Cr_image->columns),1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y, MagickMax(Ai_image->columns,Ci_image->columns),1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y, MagickMax(Br_image->columns,Cr_image->columns),1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y, MagickMax(Bi_image->columns,Ci_image->columns),1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(images); i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Bi[i]); break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br[i]*Br[i]+Bi[i]*Bi[i]+snr); Cr[i]=gamma*(Ar[i]*Br[i]+Ai[i]*Bi[i]); Ci[i]=gamma*(Ai[i]*Br[i]-Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt(Ar[i]*Ar[i]+Ai[i]*Ai[i]); Ci[i]=atan2(Ai[i],Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=QuantumScale*(Ar[i]*Br[i]-Ai[i]*Bi[i]); Ci[i]=QuantumScale*(Ai[i]*Br[i]+Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register Quantum *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register Quantum *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_942_0
crossvul-cpp_data_good_5629_0
/* * Copyright (C) 2007-2008, 2010 Sourcefire, Inc. * * Authors: Nigel Horne, Török Edvin * * Also based on Matt Olney's pdf parser in snort-nrt. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. * * TODO: Embedded fonts * TODO: Predictor image handling */ static char const rcsid[] = "$Id: pdf.c,v 1.61 2007/02/12 20:46:09 njh Exp $"; #if HAVE_CONFIG_H #include "clamav-config.h" #endif #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <ctype.h> #include <string.h> #include <fcntl.h> #include <stdlib.h> #include <errno.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <zlib.h> #include "clamav.h" #include "others.h" #include "pdf.h" #include "scanners.h" #include "fmap.h" #include "str.h" #include "bytecode.h" #include "bytecode_api.h" #include "md5.h" #include "arc4.h" #include "sha256.h" #ifdef CL_DEBUG /*#define SAVE_TMP *Save the file being worked on in tmp */ #endif static int asciihexdecode(const char *buf, off_t len, char *output); static int ascii85decode(const char *buf, off_t len, unsigned char *output); static const char *pdf_nextlinestart(const char *ptr, size_t len); static const char *pdf_nextobject(const char *ptr, size_t len); #if 1 static int xrefCheck(const char *xref, const char *eof) { const char *q; while (xref < eof && (*xref == ' ' || *xref == '\n' || *xref == '\r')) xref++; if (xref + 4 >= eof) return -1; if (!memcmp(xref, "xref", 4)) { cli_dbgmsg("cli_pdf: found xref\n"); return 0; } /* could be xref stream */ for (q=xref; q+5 < eof; q++) { if (!memcmp(q,"/XRef",4)) { cli_dbgmsg("cli_pdf: found /XRef\n"); return 0; } } return -1; } struct pdf_struct { struct pdf_obj *objs; unsigned nobjs; unsigned flags; const char *map; off_t size; off_t offset; off_t startoff; cli_ctx *ctx; const char *dir; unsigned files; uint32_t enc_objid; char *fileID; unsigned fileIDlen; char *key; unsigned keylen; }; static const char *findNextNonWSBack(const char *q, const char *start) { while (q > start && (*q == 0 || *q == 9 || *q == 0xa || *q == 0xc || *q == 0xd || *q == 0x20)) { q--; } return q; } static int find_stream_bounds(const char *start, off_t bytesleft, off_t bytesleft2, off_t *stream, off_t *endstream) { const char *q2, *q; if ((q2 = cli_memstr(start, bytesleft, "stream", 6))) { q2 += 6; bytesleft -= q2 - start; if (bytesleft < 0) return 0; if (bytesleft >= 2 && q2[0] == '\xd' && q2[1] == '\xa') q2 += 2; if (q2[0] == '\xa') q2++; *stream = q2 - start; bytesleft2 -= q2 - start; if (bytesleft2 <= 0) return 0; q = q2; q2 = cli_memstr(q, bytesleft2, "endstream", 9); if (!q2) q2 = q + bytesleft2-9; /* till EOF */ *endstream = q2 - start; if (*endstream < *stream) *endstream = *stream; return 1; } return 0; } static int pdf_findobj(struct pdf_struct *pdf) { const char *start, *q, *q2, *q3, *eof; struct pdf_obj *obj; off_t bytesleft; unsigned genid, objid; pdf->nobjs++; pdf->objs = cli_realloc2(pdf->objs, sizeof(*pdf->objs)*pdf->nobjs); if (!pdf->objs) { cli_warnmsg("cli_pdf: out of memory parsing objects (%u)\n", pdf->nobjs); return -1; } obj = &pdf->objs[pdf->nobjs-1]; memset(obj, 0, sizeof(*obj)); start = pdf->map+pdf->offset; bytesleft = pdf->size - pdf->offset; while (bytesleft > 0) { q2 = cli_memstr(start, bytesleft, "obj", 3); if (!q2) return 0;/* no more objs */ q2--; bytesleft -= q2 - start; if (*q2 != 0 && *q2 != 9 && *q2 != 0xa && *q2 != 0xc && *q2 != 0xd && *q2 != 0x20) { start = q2+4; bytesleft -= 4; continue; } break; } if (bytesleft <= 0) return 0; q = findNextNonWSBack(q2-1, start); while (q > start && isdigit(*q)) { q--; } genid = atoi(q); q = findNextNonWSBack(q-1,start); while (q > start && isdigit(*q)) { q--; } objid = atoi(q); obj->id = (objid << 8) | (genid&0xff); obj->start = q2+4 - pdf->map; obj->flags = 0; bytesleft -= 4; eof = pdf->map + pdf->size; q = pdf->map + obj->start; while (q < eof && bytesleft > 0) { off_t p_stream, p_endstream; q2 = pdf_nextobject(q, bytesleft); if (!q2) q2 = pdf->map + pdf->size; bytesleft -= q2 - q; if (find_stream_bounds(q-1, q2-q, bytesleft + (q2-q), &p_stream, &p_endstream)) { obj->flags |= 1 << OBJ_STREAM; q2 = q-1 + p_endstream + 9; bytesleft -= q2 - q + 1; if (bytesleft < 0) { obj->flags |= 1 << OBJ_TRUNCATED; pdf->offset = pdf->size; return 1;/* truncated */ } } else if ((q3 = cli_memstr(q-1, q2-q+1, "endobj", 6))) { q2 = q3 + 6; pdf->offset = q2 - pdf->map; return 1; /* obj found and offset positioned */ } else { q2++; bytesleft--; } q = q2; } obj->flags |= 1 << OBJ_TRUNCATED; pdf->offset = pdf->size; return 1;/* truncated */ } static int filter_writen(struct pdf_struct *pdf, struct pdf_obj *obj, int fout, const char *buf, off_t len, off_t *sum) { if (cli_checklimits("pdf", pdf->ctx, *sum, 0, 0)) return len; /* pretend it was a successful write to suppress CL_EWRITE */ *sum += len; return cli_writen(fout, buf, len); } static void pdfobj_flag(struct pdf_struct *pdf, struct pdf_obj *obj, enum pdf_flag flag) { const char *s= ""; pdf->flags |= 1 << flag; if (!cli_debug_flag) return; switch (flag) { case UNTERMINATED_OBJ_DICT: s = "dictionary not terminated"; break; case ESCAPED_COMMON_PDFNAME: /* like /JavaScript */ s = "escaped common pdfname"; break; case BAD_STREAM_FILTERS: s = "duplicate stream filters"; break; case BAD_PDF_VERSION: s = "bad pdf version"; break; case BAD_PDF_HEADERPOS: s = "bad pdf header position"; break; case BAD_PDF_TRAILER: s = "bad pdf trailer"; break; case BAD_PDF_TOOMANYOBJS: s = "too many pdf objs"; break; case BAD_FLATE: s = "bad deflate stream"; break; case BAD_FLATESTART: s = "bad deflate stream start"; break; case BAD_STREAMSTART: s = "bad stream start"; break; case UNKNOWN_FILTER: s = "unknown filter used"; break; case BAD_ASCIIDECODE: s = "bad ASCII decode"; break; case HEX_JAVASCRIPT: s = "hex javascript"; break; case BAD_INDOBJ: s = "referencing nonexistent obj"; break; case HAS_OPENACTION: s = "has /OpenAction"; break; case HAS_LAUNCHACTION: s = "has /LaunchAction"; break; case BAD_STREAMLEN: s = "bad /Length, too small"; break; case ENCRYPTED_PDF: s = "PDF is encrypted"; break; case LINEARIZED_PDF: s = "linearized PDF"; break; case MANY_FILTERS: s = "more than 2 filters per obj"; break; case DECRYPTABLE_PDF: s = "decryptable PDF"; break; } cli_dbgmsg("cli_pdf: %s flagged in object %u %u\n", s, obj->id>>8, obj->id&0xff); } static int filter_flatedecode(struct pdf_struct *pdf, struct pdf_obj *obj, const char *buf, off_t len, int fout, off_t *sum) { int skipped = 0; int zstat; z_stream stream; off_t nbytes; char output[BUFSIZ]; if (len == 0) return CL_CLEAN; if (*buf == '\r') { buf++; len--; pdfobj_flag(pdf, obj, BAD_STREAMSTART); /* PDF spec says stream is followed by \r\n or \n, but not \r alone. * Sample 0015315109, it has \r followed by zlib header. * Flag pdf as suspicious, and attempt to extract by skipping the \r. */ if (!len) return CL_CLEAN; } memset(&stream, 0, sizeof(stream)); stream.next_in = (Bytef *)buf; stream.avail_in = len; stream.next_out = (Bytef *)output; stream.avail_out = sizeof(output); zstat = inflateInit(&stream); if(zstat != Z_OK) { cli_warnmsg("cli_pdf: inflateInit failed\n"); return CL_EMEM; } nbytes = 0; while(stream.avail_in) { int written; zstat = inflate(&stream, Z_NO_FLUSH); /* zlib */ switch(zstat) { case Z_OK: if(stream.avail_out == 0) { if ((written=filter_writen(pdf, obj, fout, output, sizeof(output), sum))!=sizeof(output)) { cli_errmsg("cli_pdf: failed to write output file\n"); inflateEnd(&stream); return CL_EWRITE; } nbytes += written; stream.next_out = (Bytef *)output; stream.avail_out = sizeof(output); } continue; case Z_STREAM_END: default: written = sizeof(output) - stream.avail_out; if (!written && !nbytes && !skipped) { /* skip till EOL, and try inflating from there, sometimes * PDFs contain extra whitespace */ const char *q = pdf_nextlinestart(buf, len); if (q) { skipped = 1; inflateEnd(&stream); len -= q - buf; buf = q; stream.next_in = (Bytef *)buf; stream.avail_in = len; stream.next_out = (Bytef *)output; stream.avail_out = sizeof(output); zstat = inflateInit(&stream); if(zstat != Z_OK) { cli_warnmsg("cli_pdf: inflateInit failed\n"); return CL_EMEM; } pdfobj_flag(pdf, obj, BAD_FLATESTART); continue; } } if (filter_writen(pdf, obj, fout, output, written, sum)!=written) { cli_errmsg("cli_pdf: failed to write output file\n"); inflateEnd(&stream); return CL_EWRITE; } nbytes += written; stream.next_out = (Bytef *)output; stream.avail_out = sizeof(output); if (zstat == Z_STREAM_END) break; if(stream.msg) cli_dbgmsg("cli_pdf: after writing %lu bytes, got error \"%s\" inflating PDF stream in %u %u obj\n", (unsigned long)nbytes, stream.msg, obj->id>>8, obj->id&0xff); else cli_dbgmsg("cli_pdf: after writing %lu bytes, got error %d inflating PDF stream in %u %u obj\n", (unsigned long)nbytes, zstat, obj->id>>8, obj->id&0xff); /* mark stream as bad only if not encrypted */ inflateEnd(&stream); if (!nbytes) { cli_dbgmsg("cli_pdf: dumping raw stream (probably encrypted)\n"); if (filter_writen(pdf, obj, fout, buf, len, sum) != len) { cli_errmsg("cli_pdf: failed to write output file\n"); return CL_EWRITE; } pdfobj_flag(pdf, obj, BAD_FLATESTART); } else { pdfobj_flag(pdf, obj, BAD_FLATE); } return CL_CLEAN; } break; } if(stream.avail_out != sizeof(output)) { if(filter_writen(pdf, obj, fout, output, sizeof(output) - stream.avail_out, sum) < 0) { cli_errmsg("cli_pdf: failed to write output file\n"); inflateEnd(&stream); return CL_EWRITE; } } inflateEnd(&stream); return CL_CLEAN; } static struct pdf_obj *find_obj(struct pdf_struct *pdf, struct pdf_obj *obj, uint32_t objid) { unsigned j; unsigned i; /* search starting at previous obj (if exists) */ if (obj != pdf->objs) i = obj - pdf->objs; else i = 0; for (j=i;j<pdf->nobjs;j++) { obj = &pdf->objs[j]; if (obj->id == objid) return obj; } /* restart search from beginning if not found */ for (j=0;j<i;j++) { obj = &pdf->objs[j]; if (obj->id == objid) return obj; } return NULL; } static int find_length(struct pdf_struct *pdf, struct pdf_obj *obj, const char *start, off_t len) { int length; const char *q; q = cli_memstr(start, len, "/Length", 7); if (!q) return 0; q++; len -= q - start; start = pdf_nextobject(q, len); if (!start) return 0; /* len -= start - q; */ q = start; length = atoi(q); while (isdigit(*q)) q++; if (*q == ' ') { int genid; q++; genid = atoi(q); while(isdigit(*q)) q++; if (q[0] == ' ' && q[1] == 'R') { cli_dbgmsg("cli_pdf: length is in indirect object %u %u\n", length, genid); obj = find_obj(pdf, obj, (length << 8) | (genid&0xff)); if (!obj) { cli_dbgmsg("cli_pdf: indirect object not found\n"); return 0; } q = pdf_nextobject(pdf->map+obj->start, pdf->size - obj->start); if (!q) { cli_dbgmsg("cli_pdf: next object not found\n"); return 0; } length = atoi(q); } } /* limit length */ if (start - pdf->map + length+5 > pdf->size) { length = pdf->size - (start - pdf->map)-5; } return length; } #define DUMP_MASK ((1 << OBJ_FILTER_FLATE) | (1 << OBJ_FILTER_DCT) | (1 << OBJ_FILTER_AH) | (1 << OBJ_FILTER_A85) | (1 << OBJ_EMBEDDED_FILE) | (1 << OBJ_JAVASCRIPT) | (1 << OBJ_OPENACTION) | (1 << OBJ_LAUNCHACTION)) static int obj_size(struct pdf_struct *pdf, struct pdf_obj *obj, int binary) { unsigned i = obj - pdf->objs; i++; if (i < pdf->nobjs) { int s = pdf->objs[i].start - obj->start - 4; if (s > 0) { if (!binary) { const char *p = pdf->map + obj->start; const char *q = p + s; while (q > p && (isspace(*q) || isdigit(*q))) q--; if (q > p+5 && !memcmp(q-5,"endobj",6)) q -= 6; q = findNextNonWSBack(q, p); q++; return q - p; } return s; } } if (binary) return pdf->size - obj->start; return pdf->offset - obj->start - 6; } static int run_pdf_hooks(struct pdf_struct *pdf, enum pdf_phase phase, int fd, int dumpid) { int ret; struct cli_bc_ctx *bc_ctx; cli_ctx *ctx = pdf->ctx; fmap_t *map; bc_ctx = cli_bytecode_context_alloc(); if (!bc_ctx) { cli_errmsg("cli_pdf: can't allocate memory for bc_ctx"); return CL_EMEM; } map = *ctx->fmap; if (fd != -1) { map = fmap(fd, 0, 0); if (!map) { cli_warnmsg("can't mmap pdf extracted obj\n"); map = *ctx->fmap; fd = -1; } } cli_bytecode_context_setpdf(bc_ctx, phase, pdf->nobjs, pdf->objs, &pdf->flags, pdf->size, pdf->startoff); cli_bytecode_context_setctx(bc_ctx, ctx); ret = cli_bytecode_runhook(ctx, ctx->engine, bc_ctx, BC_PDF, map); cli_bytecode_context_destroy(bc_ctx); if (fd != -1) { funmap(map); } return ret; } static int pdf_extract_obj(struct pdf_struct *pdf, struct pdf_obj *obj) { char fullname[NAME_MAX + 1]; int fout; off_t sum = 0; int rc = CL_SUCCESS; char *ascii_decoded = NULL; int dump = 1; cli_dbgmsg("pdf_extract_obj: obj %u %u\n", obj->id>>8, obj->id&0xff); /* TODO: call bytecode hook here, allow override dumpability */ if ((!(obj->flags & (1 << OBJ_STREAM)) || (obj->flags & (1 << OBJ_HASFILTERS))) && !(obj->flags & DUMP_MASK)) { /* don't dump all streams */ dump = 0; } if ((obj->flags & (1 << OBJ_IMAGE)) && !(obj->flags & (1 << OBJ_FILTER_DCT))) { /* don't dump / scan non-JPG images */ dump = 0; } if (obj->flags & (1 << OBJ_FORCEDUMP)) { /* bytecode can force dump by setting this flag */ dump = 1; } if (!dump) return CL_CLEAN; cli_dbgmsg("cli_pdf: dumping obj %u %u\n", obj->id>>8, obj->id&0xff); snprintf(fullname, sizeof(fullname), "%s"PATHSEP"pdf%02u", pdf->dir, pdf->files++); fout = open(fullname,O_RDWR|O_CREAT|O_EXCL|O_TRUNC|O_BINARY, 0600); if (fout < 0) { char err[128]; cli_errmsg("cli_pdf: can't create temporary file %s: %s\n", fullname, cli_strerror(errno, err, sizeof(err))); free(ascii_decoded); return CL_ETMPFILE; } do { if (obj->flags & (1 << OBJ_STREAM)) { const char *start = pdf->map + obj->start; off_t p_stream = 0, p_endstream = 0; off_t length; find_stream_bounds(start, pdf->size - obj->start, pdf->size - obj->start, &p_stream, &p_endstream); if (p_stream && p_endstream) { const char *flate_in; long ascii_decoded_size = 0; size_t size = p_endstream - p_stream; off_t orig_length; length = find_length(pdf, obj, start, p_stream); if (length < 0) length = 0; orig_length = length; if (length > pdf->size || obj->start + p_stream + length > pdf->size) { cli_dbgmsg("cli_pdf: length out of file: %ld + %ld > %ld\n", p_stream, length, pdf->size); length = pdf->size - (obj->start + p_stream); } if (!(obj->flags & (1 << OBJ_FILTER_FLATE)) && length <= 0) { const char *q = start + p_endstream; length = size; q--; if (*q == '\n') { q--; length--; if (*q == '\r') length--; } else if (*q == '\r') { length--; } if (length < 0) length = 0; cli_dbgmsg("cli_pdf: calculated length %ld\n", length); } else { if (size > length+2) { cli_dbgmsg("cli_pdf: calculated length %ld < %ld\n", length, size); length = size; } } if (orig_length && size > orig_length + 20) { cli_dbgmsg("cli_pdf: orig length: %ld, length: %ld, size: %ld\n", orig_length, length, size); pdfobj_flag(pdf, obj, BAD_STREAMLEN); } if (!length) length = size; if (obj->flags & (1 << OBJ_FILTER_AH)) { ascii_decoded = cli_malloc(length/2 + 1); if (!ascii_decoded) { cli_errmsg("Cannot allocate memory for asciidecode\n"); rc = CL_EMEM; break; } ascii_decoded_size = asciihexdecode(start + p_stream, length, ascii_decoded); } else if (obj->flags & (1 << OBJ_FILTER_A85)) { ascii_decoded = cli_malloc(length*5); if (!ascii_decoded) { cli_errmsg("Cannot allocate memory for asciidecode\n"); rc = CL_EMEM; break; } ascii_decoded_size = ascii85decode(start+p_stream, length, (unsigned char*)ascii_decoded); } if (ascii_decoded_size < 0) { /* don't flag for images or truncated objs*/ if (!(obj->flags & ((1 << OBJ_IMAGE) | (1 << OBJ_TRUNCATED)))) pdfobj_flag(pdf, obj, BAD_ASCIIDECODE); cli_dbgmsg("cli_pdf: failed to asciidecode in %u %u obj\n", obj->id>>8,obj->id&0xff); free(ascii_decoded); ascii_decoded = NULL; /* attempt to directly flatedecode it */ } /* either direct or ascii-decoded input */ if (!ascii_decoded) ascii_decoded_size = length; flate_in = ascii_decoded ? ascii_decoded : start+p_stream; if (obj->flags & (1 << OBJ_FILTER_FLATE)) { cli_dbgmsg("cli_pdf: deflate len %ld (orig %ld)\n", ascii_decoded_size, (long)orig_length); rc = filter_flatedecode(pdf, obj, flate_in, ascii_decoded_size, fout, &sum); } else { if (filter_writen(pdf, obj, fout, flate_in, ascii_decoded_size, &sum) != ascii_decoded_size) rc = CL_EWRITE; } } } else if (obj->flags & (1 << OBJ_JAVASCRIPT)) { const char *q2; const char *q = pdf->map+obj->start; /* TODO: get obj-endobj size */ off_t bytesleft = obj_size(pdf, obj, 0); if (bytesleft < 0) break; q2 = cli_memstr(q, bytesleft, "/JavaScript", 11); if (!q2) break; bytesleft -= q2 - q; do { q2++; bytesleft--; q = pdf_nextobject(q2, bytesleft); if (!q) break; bytesleft -= q - q2; q2 = q; } while (*q == '/'); if (!q) break; if (*q == '(') { if (filter_writen(pdf, obj, fout, q+1, bytesleft-1, &sum) != (bytesleft-1)) { rc = CL_EWRITE; break; } } else if (*q == '<') { char *decoded; q2 = memchr(q+1, '>', bytesleft); if (!q2) q2 = q + bytesleft; decoded = cli_malloc(q2 - q); if (!decoded) { rc = CL_EMEM; break; } cli_hex2str_to(q2, decoded, q2-q-1); decoded[q2-q-1] = '\0'; cli_dbgmsg("cli_pdf: found hexadecimal encoded javascript in %u %u obj\n", obj->id>>8, obj->id&0xff); pdfobj_flag(pdf, obj, HEX_JAVASCRIPT); filter_writen(pdf, obj, fout, decoded, q2-q-1, &sum); free(decoded); } } else { off_t bytesleft = obj_size(pdf, obj, 0); if (filter_writen(pdf, obj, fout , pdf->map + obj->start, bytesleft,&sum) != bytesleft) rc = CL_EWRITE; } } while (0); cli_dbgmsg("cli_pdf: extracted %ld bytes %u %u obj to %s\n", sum, obj->id>>8, obj->id&0xff, fullname); if (sum) { int rc2; cli_updatelimits(pdf->ctx, sum); /* TODO: invoke bytecode on this pdf obj with metainformation associated * */ lseek(fout, 0, SEEK_SET); rc2 = cli_magic_scandesc(fout, pdf->ctx); if (rc2 == CL_VIRUS || rc == CL_SUCCESS) rc = rc2; if (rc == CL_CLEAN) { rc2 = run_pdf_hooks(pdf, PDF_PHASE_POSTDUMP, fout, obj - pdf->objs); if (rc2 == CL_VIRUS) rc = rc2; } } close(fout); free(ascii_decoded); if (!pdf->ctx->engine->keeptmp) if (cli_unlink(fullname) && rc != CL_VIRUS) rc = CL_EUNLINK; return rc; } enum objstate { STATE_NONE, STATE_S, STATE_FILTER, STATE_JAVASCRIPT, STATE_OPENACTION, STATE_LINEARIZED, STATE_LAUNCHACTION, STATE_ANY /* for actions table below */ }; struct pdfname_action { const char *pdfname; enum pdf_objflags set_objflag;/* OBJ_DICT is noop */ enum objstate from_state;/* STATE_NONE is noop */ enum objstate to_state; }; static struct pdfname_action pdfname_actions[] = { {"ASCIIHexDecode", OBJ_FILTER_AH, STATE_FILTER, STATE_FILTER}, {"ASCII85Decode", OBJ_FILTER_A85, STATE_FILTER, STATE_FILTER}, {"A85", OBJ_FILTER_A85, STATE_FILTER, STATE_FILTER}, {"AHx", OBJ_FILTER_AH, STATE_FILTER, STATE_FILTER}, {"EmbeddedFile", OBJ_EMBEDDED_FILE, STATE_NONE, STATE_NONE}, {"FlateDecode", OBJ_FILTER_FLATE, STATE_FILTER, STATE_FILTER}, {"Fl", OBJ_FILTER_FLATE, STATE_FILTER, STATE_FILTER}, {"Image", OBJ_IMAGE, STATE_NONE, STATE_NONE}, {"LZWDecode", OBJ_FILTER_LZW, STATE_FILTER, STATE_FILTER}, {"LZW", OBJ_FILTER_LZW, STATE_FILTER, STATE_FILTER}, {"RunLengthDecode", OBJ_FILTER_RL, STATE_FILTER, STATE_FILTER}, {"RL", OBJ_FILTER_RL, STATE_FILTER, STATE_FILTER}, {"CCITTFaxDecode", OBJ_FILTER_FAX, STATE_FILTER, STATE_FILTER}, {"CCF", OBJ_FILTER_FAX, STATE_FILTER, STATE_FILTER}, {"JBIG2Decode", OBJ_FILTER_DCT, STATE_FILTER, STATE_FILTER}, {"DCTDecode", OBJ_FILTER_DCT, STATE_FILTER, STATE_FILTER}, {"DCT", OBJ_FILTER_DCT, STATE_FILTER, STATE_FILTER}, {"JPXDecode", OBJ_FILTER_JPX, STATE_FILTER, STATE_FILTER}, {"Crypt", OBJ_FILTER_CRYPT, STATE_FILTER, STATE_NONE}, {"Standard", OBJ_FILTER_STANDARD, STATE_FILTER, STATE_FILTER}, {"Sig", OBJ_SIGNED, STATE_ANY, STATE_NONE}, {"V", OBJ_SIGNED, STATE_ANY, STATE_NONE}, {"R", OBJ_SIGNED, STATE_ANY, STATE_NONE}, {"Linearized", OBJ_DICT, STATE_NONE, STATE_LINEARIZED}, {"Filter", OBJ_HASFILTERS, STATE_ANY, STATE_FILTER}, {"JavaScript", OBJ_JAVASCRIPT, STATE_S, STATE_JAVASCRIPT}, {"Length", OBJ_DICT, STATE_FILTER, STATE_NONE}, {"S", OBJ_DICT, STATE_NONE, STATE_S}, {"Type", OBJ_DICT, STATE_NONE, STATE_NONE}, {"OpenAction", OBJ_OPENACTION, STATE_ANY, STATE_OPENACTION}, {"Launch", OBJ_LAUNCHACTION, STATE_ANY, STATE_LAUNCHACTION} }; #define KNOWN_FILTERS ((1 << OBJ_FILTER_AH) | (1 << OBJ_FILTER_RL) | (1 << OBJ_FILTER_A85) | (1 << OBJ_FILTER_FLATE) | (1 << OBJ_FILTER_LZW) | (1 << OBJ_FILTER_FAX) | (1 << OBJ_FILTER_DCT) | (1 << OBJ_FILTER_JPX) | (1 << OBJ_FILTER_CRYPT)) static void handle_pdfname(struct pdf_struct *pdf, struct pdf_obj *obj, const char *pdfname, int escapes, enum objstate *state) { struct pdfname_action *act = NULL; unsigned j; for (j=0;j<sizeof(pdfname_actions)/sizeof(pdfname_actions[0]);j++) { if (!strcmp(pdfname, pdfname_actions[j].pdfname)) { act = &pdfname_actions[j]; break; } } if (!act) { if (*state == STATE_FILTER && !(obj->flags & (1 << OBJ_SIGNED)) && /* these are digital signature objects, filter doesn't matter, * we don't need them anyway */ !(obj->flags & KNOWN_FILTERS)) { cli_dbgmsg("cli_pdf: unknown filter %s\n", pdfname); obj->flags |= 1 << OBJ_FILTER_UNKNOWN; } return; } if (escapes) { /* if a commonly used PDF name is escaped that is certainly suspicious. */ cli_dbgmsg("cli_pdf: pdfname %s is escaped\n", pdfname); pdfobj_flag(pdf, obj, ESCAPED_COMMON_PDFNAME); } if (act->from_state == *state || act->from_state == STATE_ANY) { *state = act->to_state; if (*state == STATE_FILTER && act->set_objflag !=OBJ_DICT && (obj->flags & (1 << act->set_objflag))) { cli_dbgmsg("cli_pdf: duplicate stream filter %s\n", pdfname); pdfobj_flag(pdf, obj, BAD_STREAM_FILTERS); } obj->flags |= 1 << act->set_objflag; } else { /* auto-reset states */ switch (*state) { case STATE_S: *state = STATE_NONE; break; default: break; } } } static char *pdf_readstring(const char *q0, int len, const char *key, unsigned *slen); static int pdf_readint(const char *q0, int len, const char *key); static const char *pdf_getdict(const char *q0, int* len, const char *key); static void pdf_parse_trailer(struct pdf_struct *pdf, const char *s, long length) { char *newID; newID = pdf_readstring(s, length, "/ID", &pdf->fileIDlen); if (newID) { free(pdf->fileID); pdf->fileID = newID; } } static void pdf_parseobj(struct pdf_struct *pdf, struct pdf_obj *obj) { /* enough to hold common pdf names, we don't need all the names */ char pdfname[64]; const char *q2, *q3; const char *q = obj->start + pdf->map; const char *dict, *start; off_t dict_length; off_t bytesleft = obj_size(pdf, obj, 1); unsigned i, filters=0; enum objstate objstate = STATE_NONE; if (bytesleft < 0) return; start = q; /* find start of dictionary */ do { q2 = pdf_nextobject(q, bytesleft); bytesleft -= q2 -q; if (!q2 || bytesleft < 0) { cli_dbgmsg("cli_pdf: %u %u obj: no dictionary\n", obj->id>>8, obj->id&0xff); return; } q3 = memchr(q-1, '<', q2-q+1); q2++; bytesleft--; q = q2; } while (!q3 || q3[1] != '<'); dict = q3+2; q = dict; bytesleft = obj_size(pdf, obj, 1) - (q - start); /* find end of dictionary */ do { q2 = pdf_nextobject(q, bytesleft); bytesleft -= q2 -q; if (!q2 || bytesleft < 0) { cli_dbgmsg("cli_pdf: %u %u obj: broken dictionary\n", obj->id>>8, obj->id&0xff); return; } q3 = memchr(q-1, '>', q2-q+1); q2++; bytesleft--; q = q2; } while (!q3 || q3[1] != '>'); obj->flags |= 1 << OBJ_DICT; dict_length = q3 - dict; /* process pdf names */ for (q = dict;dict_length > 0;) { int escapes = 0; q2 = memchr(q, '/', dict_length); if (!q2) break; dict_length -= q2 - q; q = q2; /* normalize PDF names */ for (i = 0;dict_length > 0 && (i < sizeof(pdfname)-1); i++) { q++; dict_length--; if (*q == '#') { if (cli_hex2str_to(q+1, pdfname+i, 2) == -1) break; q += 2; dict_length -= 2; escapes = 1; continue; } if (*q == ' ' || *q == '\t' || *q == '\r' || *q == '\n' || *q == '/' || *q == '>' || *q == ']' || *q == '[' || *q == '<' || *q == '(') break; pdfname[i] = *q; } pdfname[i] = '\0'; handle_pdfname(pdf, obj, pdfname, escapes, &objstate); if (objstate == STATE_LINEARIZED) { long trailer_end, trailer; pdfobj_flag(pdf, obj, LINEARIZED_PDF); objstate = STATE_NONE; trailer_end = pdf_readint(q, dict_length, "/H"); if (trailer_end > 0 && trailer_end < pdf->size) { trailer = trailer_end - 1024; if (trailer < 0) trailer = 0; q2 = pdf->map + trailer; cli_dbgmsg("cli_pdf: looking for trailer in linearized pdf: %ld - %ld\n", trailer, trailer_end); pdf_parse_trailer(pdf, q2, trailer_end - trailer); if (pdf->fileID) cli_dbgmsg("cli_pdf: found fileID\n"); } } if (objstate == STATE_LAUNCHACTION) pdfobj_flag(pdf, obj, HAS_LAUNCHACTION); if (dict_length > 0 && (objstate == STATE_JAVASCRIPT || objstate == STATE_OPENACTION)) { if (objstate == STATE_OPENACTION) pdfobj_flag(pdf, obj, HAS_OPENACTION); q2 = pdf_nextobject(q, dict_length); if (q2 && isdigit(*q2)) { uint32_t objid = atoi(q2) << 8; while (isdigit(*q2)) q2++; q2 = pdf_nextobject(q2, dict_length); if (q2 && isdigit(*q2)) { objid |= atoi(q2) & 0xff; q2 = pdf_nextobject(q2, dict_length); if (q2 && *q2 == 'R') { struct pdf_obj *obj2; cli_dbgmsg("cli_pdf: found %s stored in indirect object %u %u\n", pdfname, objid >> 8, objid&0xff); obj2 = find_obj(pdf, obj, objid); if (obj2) { enum pdf_objflags flag = objstate == STATE_JAVASCRIPT ? OBJ_JAVASCRIPT : OBJ_OPENACTION; obj2->flags |= 1 << flag; obj->flags &= ~(1 << flag); } else { pdfobj_flag(pdf, obj, BAD_INDOBJ); } } } } objstate = STATE_NONE; } } for (i=0;i<sizeof(pdfname_actions)/sizeof(pdfname_actions[0]);i++) { const struct pdfname_action *act = &pdfname_actions[i]; if ((obj->flags & (1 << act->set_objflag)) && act->from_state == STATE_FILTER && act->to_state == STATE_FILTER && act->set_objflag != OBJ_FILTER_CRYPT && act->set_objflag != OBJ_FILTER_STANDARD) { filters++; } } if (filters > 2) { /* more than 2 non-crypt filters */ pdfobj_flag(pdf, obj, MANY_FILTERS); } if (obj->flags & ((1 << OBJ_SIGNED) | KNOWN_FILTERS)) obj->flags &= ~(1 << OBJ_FILTER_UNKNOWN); if (obj->flags & (1 << OBJ_FILTER_UNKNOWN)) pdfobj_flag(pdf, obj, UNKNOWN_FILTER); cli_dbgmsg("cli_pdf: %u %u obj flags: %02x\n", obj->id>>8, obj->id&0xff, obj->flags); } static void pdf_parse_encrypt(struct pdf_struct *pdf, const char *enc, int len) { const char *q, *q2; uint32_t objid; if (len >= 16 && !strncmp(enc, "/EncryptMetadata", 16)) { q = cli_memstr(enc+16, len-16, "/Encrypt", 8); if (!q) return; len -= q - enc; enc = q; } q = enc + 8; len -= 8; q2 = pdf_nextobject(q, len); if (!q2 || !isdigit(*q2)) return; objid = atoi(q2) << 8; len -= q2 - q; q = q2; q2 = pdf_nextobject(q, len); if (!q2 || !isdigit(*q2)) return; objid |= atoi(q2) & 0xff; len -= q2 - q; q = q2; q2 = pdf_nextobject(q, len); if (!q2 || *q2 != 'R') return; cli_dbgmsg("cli_pdf: Encrypt dictionary in obj %d %d\n", objid>>8, objid&0xff); pdf->enc_objid = objid; } static const char *pdf_getdict(const char *q0, int* len, const char *key) { const char *q; if (*len <= 0) { cli_dbgmsg("cli_pdf: bad length %d\n", *len); return NULL; } q = cli_memstr(q0, *len, key, strlen(key)); if (!q) { cli_dbgmsg("cli_pdf: %s not found in dict\n", key); return NULL; } *len -= q - q0; q0 = q; q = pdf_nextobject(q0 + 1, *len - 1); if (!q) { cli_dbgmsg("cli_pdf: %s is invalid in dict\n", key); return NULL; } if (q[-1] == '<') q--; *len -= q - q0; return q; } static char *pdf_readstring(const char *q0, int len, const char *key, unsigned *slen) { char *s, *s0; const char *start, *q, *end; if (slen) *slen = 0; q = pdf_getdict(q0, &len, key); if (!q) return NULL; if (*q == '(') { int paren = 1; start = ++q; for (;paren > 0 && len > 0; q++,len--) { switch (*q) { case '(': paren++; break; case ')': paren--; break; case '\\': q++; len--; break; default: break; } } q--; len = q - start; s0 = s = cli_malloc(len + 1); if (!s) return NULL; end = start + len; for (q = start;q < end;q++) { if (*q != '\\') { *s++ = *q; } else { q++; switch (*q) { case 'n': *s++ = '\n'; break; case 'r': *s++ = '\r'; break; case 't': *s++ = '\t'; break; case 'b': *s++ = '\b'; break; case 'f': *s++ = '\f'; break; case '(':/* fall-through */ case ')':/* fall-through */ case '\\': *s++ = *q; break; case '\n': /* ignore */ break; case '\r': /* ignore */ if (q+1 < end && q[1] == '\n') q++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* octal escape */ if (q+2 < end) q++; *s++ = 64*(q[0] - '0')+ 8*(q[1] - '0')+ (q[2] - '0'); break; default: /* ignore */ q--; break; } } } *s++ = '\0'; if (slen) *slen = s - s0 - 1; return s0; } if (*q == '<') { start = ++q; q = memchr(q+1, '>', len); if (!q) return NULL; s = cli_malloc((q - start)/2 + 1); cli_hex2str_to(start, s, q - start); s[(q-start)/2] = '\0'; if (slen) *slen = (q - start)/2; return s; } cli_dbgmsg("cli_pdf: %s is invalid string in dict\n", key); return NULL; } static int pdf_readint(const char *q0, int len, const char *key) { const char *q = pdf_getdict(q0, &len, key); if (!q) return -1; return atoi(q); } static int pdf_readbool(const char *q0, int len, const char *key, int Default) { const char *q = pdf_getdict(q0, &len, key); if (!q || len < 5) return Default; if (!strncmp(q, "true", 4)) return 1; if (!strncmp(q, "false", 5)) return 0; cli_dbgmsg("cli_pdf: invalid value for %s bool\n", key); return Default; } static const char *key_padding = "\x28\xBF\x4E\x5E\x4E\x75\x8A\x41\x64\x00\x4e\x56\xff\xfa\x01\x08" "\x2e\x2e\x00\xB6\xD0\x68\x3E\x80\x2F\x0C\xA9\xFE\x64\x53\x69\x7A"; static void dbg_printhex(const char *msg, const char *hex, unsigned len) { if (cli_debug_flag) { char *kh = cli_str2hex(hex, len); cli_dbgmsg("cli_pdf: %s: %s\n", msg, kh); free(kh); } } static void check_user_password(struct pdf_struct *pdf, int R, const char *O, const char *U, int32_t P, int EM, unsigned length, unsigned oulen) { unsigned i; uint8_t result[16]; char data[32]; cli_md5_ctx md5; struct arc4_state arc4; unsigned password_empty = 0; dbg_printhex("U: ", U, 32); dbg_printhex("O: ", O, 32); if (R == 5) { uint8_t result2[32]; SHA256_CTX sha256; /* supplement to ISO3200, 3.5.2 Algorithm 3.11 */ sha256_init(&sha256); /* user validation salt */ sha256_update(&sha256, U+32, 8); sha256_final(&sha256, result2); dbg_printhex("Computed U", result2, 32); if (!memcmp(result2, U, 32)) { password_empty = 1; /* Algorithm 3.2a could be used to recover encryption key */ } } else if ((R >= 2) && (R <= 4)) { /* 7.6.3.3 Algorithm 2 */ cli_md5_init(&md5); /* empty password, password == padding */ cli_md5_update(&md5, key_padding, 32); cli_md5_update(&md5, O, 32); P = le32_to_host(P); cli_md5_update(&md5, &P, 4); cli_md5_update(&md5, pdf->fileID, pdf->fileIDlen); if (R >= 4 && !EM) { uint32_t v = 0xFFFFFFFF; cli_md5_update(&md5, &v, 4); } cli_md5_final(result, &md5); if (length > 128) length = 128; if (R >= 3) { for (i=0;i<50;i++) { cli_md5_init(&md5); cli_md5_update(&md5, result, length/8); cli_md5_final(result, &md5); } } if (R == 2) length = 40; pdf->keylen = length / 8; pdf->key = cli_malloc(pdf->keylen); if (!pdf->key) return; memcpy(pdf->key, result, pdf->keylen); dbg_printhex("md5", result, 16); dbg_printhex("Candidate encryption key", pdf->key, pdf->keylen); /* 7.6.3.3 Algorithm 6 */ if (R == 2) { /* 7.6.3.3 Algorithm 4 */ memcpy(data, key_padding, 32); arc4_init(&arc4, pdf->key, pdf->keylen); arc4_apply(&arc4, data, 32); dbg_printhex("computed U (R2)", data, 32); if (!memcmp(data, U, 32)) password_empty = 1; } else if (R >= 3) { unsigned len = pdf->keylen; /* 7.6.3.3 Algorithm 5 */ cli_md5_init(&md5); cli_md5_update(&md5, key_padding, 32); cli_md5_update(&md5, pdf->fileID, pdf->fileIDlen); cli_md5_final(result, &md5); memcpy(data, pdf->key, len); arc4_init(&arc4, data, len); arc4_apply(&arc4, result, 16); for (i=1;i<=19;i++) { unsigned j; for (j=0;j<len;j++) data[j] = pdf->key[j] ^ i; arc4_init(&arc4, data, len); arc4_apply(&arc4, result, 16); } dbg_printhex("fileID", pdf->fileID, pdf->fileIDlen); dbg_printhex("computed U (R>=3)", result, 16); if (!memcmp(result, U, 16)) password_empty = 1; } else { cli_dbgmsg("cli_pdf: invalid revision %d\n", R); } } else { /* Supported R is in {2,3,4,5} */ cli_dbgmsg("cli_pdf: R value out of range\n"); return; } if (password_empty) { cli_dbgmsg("cli_pdf: user password is empty\n"); /* The key we computed above is the key used to encrypt the streams. * We could decrypt it now if we wanted to */ pdf->flags |= 1 << DECRYPTABLE_PDF; } else { cli_dbgmsg("cli_pdf: user/owner password would be required for decryption\n"); /* the key is not valid, we would need the user or the owner password to * decrypt */ } } static void pdf_handle_enc(struct pdf_struct *pdf) { struct pdf_obj *obj; uint32_t len, required_flags, n, R, P, length, EM, i, oulen; char *O, *U; const char *q, *q2; if (pdf->enc_objid == ~0u) return; if (!pdf->fileID) { cli_dbgmsg("cli_pdf: pdf_handle_enc no file ID\n"); return; } obj = find_obj(pdf, pdf->objs, pdf->enc_objid); if (!obj) { cli_dbgmsg("cli_pdf: can't find encrypted object %d %d\n", pdf->enc_objid>>8, pdf->enc_objid&0xff); return; } len = obj_size(pdf, obj, 1); q = pdf->map + obj->start; O = U = NULL; do { EM = pdf_readbool(q, len, "/EncryptMetadata", 1); P = pdf_readint(q, len, "/P"); if (P == ~0u) { cli_dbgmsg("cli_pdf: invalid P\n"); break; } q2 = cli_memstr(q, len, "/Standard", 9); if (!q2) { cli_dbgmsg("cli_pdf: /Standard not found\n"); break; } /* we can have both of these: * /AESV2/Length /Standard/Length * /Length /Standard * make sure we don't mistake AES's length for Standard's */ length = pdf_readint(q2, len - (q2 - q), "/Length"); if (length == ~0u) length = pdf_readint(q, len, "/Length"); if (length == ~0u) length = 40; if (length < 40) { cli_dbgmsg("cli_pdf: invalid length: %d\n", length); length = 40; } R = pdf_readint(q, len, "/R"); if (R == ~0u) { cli_dbgmsg("cli_pdf: invalid R\n"); break; } if ((R > 5) || (R < 2)) { cli_dbgmsg("cli_pdf: R value outside supported range [2..5]\n"); break; } if (R < 5) oulen = 32; else oulen = 48; n = 0; O = pdf_readstring(q, len, "/O", &n); if (!O || n < oulen) { cli_dbgmsg("cli_pdf: invalid O: %d\n", n); if (O) dbg_printhex("invalid O", O, n); break; } if (n > oulen) { for (i=oulen;i<n;i++) if (O[i]) break; if (i != n) { dbg_printhex("too long O", O, n); break; } } n = 0; U = pdf_readstring(q, len, "/U", &n); if (!U || n < oulen) { cli_dbgmsg("cli_pdf: invalid U: %d\n", n); if (U) dbg_printhex("invalid U", U, n); break; } if (n > oulen) { for (i=oulen;i<n;i++) if (U[i]) break; if (i != n) { dbg_printhex("too long U", U, n); break; } } cli_dbgmsg("cli_pdf: Encrypt R: %d, P %x, length: %d\n", R, P, length); if (length % 8) { cli_dbgmsg("cli_pdf: wrong key length, not multiple of 8\n"); break; } check_user_password(pdf, R, O, U, P, EM, length, oulen); } while (0); free(O); free(U); } int cli_pdf(const char *dir, cli_ctx *ctx, off_t offset) { struct pdf_struct pdf; fmap_t *map = *ctx->fmap; size_t size = map->len - offset; off_t versize = size > 1032 ? 1032 : size; off_t map_off, bytesleft; long xref; const char *pdfver, *start, *eofmap, *q, *eof; int rc; unsigned i; cli_dbgmsg("in cli_pdf(%s)\n", dir); memset(&pdf, 0, sizeof(pdf)); pdf.ctx = ctx; pdf.dir = dir; pdf.enc_objid = ~0u; pdfver = start = fmap_need_off_once(map, offset, versize); /* Check PDF version */ if (!pdfver) { cli_errmsg("cli_pdf: mmap() failed (1)\n"); return CL_EMAP; } /* offset is 0 when coming from filetype2 */ pdfver = cli_memstr(pdfver, versize, "%PDF-", 5); if (!pdfver) { cli_dbgmsg("cli_pdf: no PDF- header found\n"); return CL_SUCCESS; } /* Check for PDF-1.[0-9]. Although 1.7 is highest now, allow for future * versions */ if (pdfver[5] != '1' || pdfver[6] != '.' || pdfver[7] < '1' || pdfver[7] > '9') { pdf.flags |= 1 << BAD_PDF_VERSION; cli_dbgmsg("cli_pdf: bad pdf version: %.8s\n", pdfver); } if (pdfver != start || offset) { pdf.flags |= 1 << BAD_PDF_HEADERPOS; cli_dbgmsg("cli_pdf: PDF header is not at position 0: %ld\n",pdfver-start+offset); } offset += pdfver - start; /* find trailer and xref, don't fail if not found */ map_off = (off_t)map->len - 2048; if (map_off < 0) map_off = 0; bytesleft = map->len - map_off; eofmap = fmap_need_off_once(map, map_off, bytesleft); if (!eofmap) { cli_errmsg("cli_pdf: mmap() failed (2)\n"); return CL_EMAP; } eof = eofmap + bytesleft; for (q=&eofmap[bytesleft-5]; q > eofmap; q--) { if (memcmp(q, "%%EOF", 5) == 0) break; } if (q <= eofmap) { pdf.flags |= 1 << BAD_PDF_TRAILER; cli_dbgmsg("cli_pdf: %%%%EOF not found\n"); } else { const char *t; /*size = q - eofmap + map_off;*/ q -= 9; for (;q > eofmap;q--) { if (memcmp(q, "startxref", 9) == 0) break; } if (q <= eofmap) { pdf.flags |= 1 << BAD_PDF_TRAILER; cli_dbgmsg("cli_pdf: startxref not found\n"); } else { const char *enc; for (t=q;t > eofmap; t--) { if (memcmp(t,"trailer",7) == 0) break; } enc = cli_memstr(eofmap, bytesleft, "/Encrypt", 8); if (enc) { pdf.flags |= 1 << ENCRYPTED_PDF; cli_dbgmsg("cli_pdf: encrypted pdf found, stream will probably fail to decompress!\n"); pdf_parse_encrypt(&pdf, enc, eof - enc); pdf_parse_trailer(&pdf, eofmap, bytesleft); } q += 9; while (q < eof && (*q == ' ' || *q == '\n' || *q == '\r')) { q++; } xref = atol(q); bytesleft = map->len - offset - xref; if (bytesleft > 4096) bytesleft = 4096; q = fmap_need_off_once(map, offset + xref, bytesleft); if (!q || xrefCheck(q, q+bytesleft) == -1) { cli_dbgmsg("cli_pdf: did not find valid xref\n"); pdf.flags |= 1 << BAD_PDF_TRAILER; } } } size -= offset; pdf.size = size; pdf.map = fmap_need_off(map, offset, size); pdf.startoff = offset; if (!pdf.map) { cli_errmsg("cli_pdf: mmap() failed (3)\n"); return CL_EMAP; } rc = run_pdf_hooks(&pdf, PDF_PHASE_PRE, -1, -1); if (rc) { cli_dbgmsg("cli_pdf: returning %d\n", rc); return rc == CL_BREAK ? CL_CLEAN : rc; } /* parse PDF and find obj offsets */ while ((rc = pdf_findobj(&pdf)) > 0) { struct pdf_obj *obj = &pdf.objs[pdf.nobjs-1]; cli_dbgmsg("cli_pdf: found %d %d obj @%ld\n", obj->id >> 8, obj->id&0xff, obj->start + offset); } if (pdf.nobjs) pdf.nobjs--; if (rc == -1) pdf.flags |= 1 << BAD_PDF_TOOMANYOBJS; /* must parse after finding all objs, so we can flag indirect objects */ for (i=0;i<pdf.nobjs;i++) { struct pdf_obj *obj = &pdf.objs[i]; pdf_parseobj(&pdf, obj); } pdf_handle_enc(&pdf); if (DETECT_ENCRYPTED && (pdf.flags & (1 << ENCRYPTED_PDF)) && !(pdf.flags & (1 << DECRYPTABLE_PDF))) { /* It is encrypted, and a password/key needs to be supplied to decrypt. * This doesn't trigger for PDFs that are encrypted but don't need * a password to decrypt */ cli_append_virus(ctx, "Heuristics.Encrypted.PDF"); rc = CL_VIRUS; } if (!rc) rc = run_pdf_hooks(&pdf, PDF_PHASE_PARSED, -1, -1); /* extract PDF objs */ for (i=0;!rc && i<pdf.nobjs;i++) { struct pdf_obj *obj = &pdf.objs[i]; rc = pdf_extract_obj(&pdf, obj); } if (pdf.flags & (1 << ENCRYPTED_PDF)) pdf.flags &= ~ ((1 << BAD_FLATESTART) | (1 << BAD_STREAMSTART) | (1 << BAD_ASCIIDECODE)); if (pdf.flags && !rc) { cli_dbgmsg("cli_pdf: flags 0x%02x\n", pdf.flags); rc = run_pdf_hooks(&pdf, PDF_PHASE_END, -1, -1); if (!rc && (ctx->options & CL_SCAN_ALGORITHMIC)) { if (pdf.flags & (1 << ESCAPED_COMMON_PDFNAME)) { /* for example /Fl#61te#44#65#63#6f#64#65 instead of /FlateDecode */ cli_append_virus(ctx, "Heuristics.PDF.ObfuscatedNameObject"); rc = cli_found_possibly_unwanted(ctx); } } #if 0 /* TODO: find both trailers, and /Encrypt settings */ if (pdf.flags & (1 << LINEARIZED_PDF)) pdf.flags &= ~ (1 << BAD_ASCIIDECODE); if (pdf.flags & (1 << MANY_FILTERS)) pdf.flags &= ~ (1 << BAD_ASCIIDECODE); if (!rc && (pdf.flags & ((1 << BAD_PDF_TOOMANYOBJS) | (1 << BAD_STREAM_FILTERS) | (1<<BAD_FLATE) | (1<<BAD_ASCIIDECODE)| (1<<UNTERMINATED_OBJ_DICT) | (1<<UNKNOWN_FILTER)))) { rc = CL_EUNPACK; } #endif } cli_dbgmsg("cli_pdf: returning %d\n", rc); free(pdf.objs); free(pdf.fileID); free(pdf.key); /* PDF hooks may abort, don't return CL_BREAK to caller! */ return rc == CL_BREAK ? CL_CLEAN : rc; } #else static int try_flatedecode(unsigned char *buf, off_t real_len, off_t calculated_len, int fout, cli_ctx *ctx); static int flatedecode(unsigned char *buf, off_t len, int fout, cli_ctx *ctx); int cli_pdf(const char *dir, cli_ctx *ctx, off_t offset) { off_t size; /* total number of bytes in the file */ off_t bytesleft, trailerlength; char *buf; /* start of memory mapped area */ const char *p, *q, *trailerstart; const char *xrefstart; /* cross reference table */ /*size_t xreflength;*/ int printed_predictor_message, printed_embedded_font_message, rc; unsigned int files; fmap_t *map = *ctx->fmap; int opt_failed = 0; cli_dbgmsg("in cli_pdf(%s)\n", dir); size = map->len - offset; if(size <= 7) /* doesn't even include the file header */ return CL_CLEAN; p = buf = fmap_need_off_once(map, 0, size); /* FIXME: really port to fmap */ if(!buf) { cli_errmsg("cli_pdf: mmap() failed\n"); return CL_EMAP; } cli_dbgmsg("cli_pdf: scanning %lu bytes\n", (unsigned long)size); /* Lines are terminated by \r, \n or both */ /* File Header */ bytesleft = size - 5; for(q = p; bytesleft; bytesleft--, q++) { if(!strncasecmp(q, "%PDF-", 5)) { bytesleft = size - (off_t) (q - p); p = q; break; } } if(!bytesleft) { cli_dbgmsg("cli_pdf: file header not found\n"); return CL_CLEAN; } /* Find the file trailer */ for(q = &p[bytesleft - 5]; q > p; --q) if(strncasecmp(q, "%%EOF", 5) == 0) break; if(q <= p) { cli_dbgmsg("cli_pdf: trailer not found\n"); return CL_CLEAN; } for(trailerstart = &q[-7]; trailerstart > p; --trailerstart) if(memcmp(trailerstart, "trailer", 7) == 0) break; /* * q points to the end of the trailer section */ trailerlength = (long)(q - trailerstart); if(cli_memstr(trailerstart, trailerlength, "Encrypt", 7)) { /* * This tends to mean that the file is, in effect, read-only * http://www.cs.cmu.edu/~dst/Adobe/Gallery/anon21jul01-pdf-encryption.txt * http://www.adobe.com/devnet/pdf/ */ cli_dbgmsg("cli_pdf: Encrypted PDF files not yet supported\n"); return CL_CLEAN; } /* * not true, since edits may put data after the trailer bytesleft -= trailerlength; */ /* * FIXME: Handle more than one xref section in the xref table */ for(xrefstart = trailerstart; xrefstart > p; --xrefstart) if(memcmp(xrefstart, "xref", 4) == 0) /* * Make sure it's the start of the line, not a startxref * token */ if((xrefstart[-1] == '\n') || (xrefstart[-1] == '\r')) break; if(xrefstart == p) { cli_dbgmsg("cli_pdf: xref not found\n"); return CL_CLEAN; } printed_predictor_message = printed_embedded_font_message = 0; /* * not true, since edits may put data after the trailer xreflength = (size_t)(trailerstart - xrefstart); bytesleft -= xreflength; */ files = 0; rc = CL_CLEAN; /* * The body section consists of a sequence of indirect objects */ while((p < xrefstart) && (cli_checklimits("cli_pdf", ctx, 0, 0, 0)==CL_CLEAN) && ((q = pdf_nextobject(p, bytesleft)) != NULL)) { int is_ascii85decode, is_flatedecode, fout, len, has_cr; /*int object_number, generation_number;*/ const char *objstart, *objend, *streamstart, *streamend; unsigned long length, objlen, real_streamlen, calculated_streamlen; int is_embedded_font, predictor; char fullname[NAME_MAX + 1]; rc = CL_CLEAN; if(q == xrefstart) break; if(memcmp(q, "xref", 4) == 0) break; /*object_number = atoi(q);*/ bytesleft -= (off_t)(q - p); p = q; if(memcmp(q, "endobj", 6) == 0) continue; if(!isdigit(*q)) { cli_dbgmsg("cli_pdf: Object number missing\n"); break; } q = pdf_nextobject(p, bytesleft); if((q == NULL) || !isdigit(*q)) { cli_dbgmsg("cli_pdf: Generation number missing\n"); break; } /*generation_number = atoi(q);*/ bytesleft -= (off_t)(q - p); p = q; q = pdf_nextobject(p, bytesleft); if((q == NULL) || (memcmp(q, "obj", 3) != 0)) { cli_dbgmsg("cli_pdf: Indirect object missing \"obj\"\n"); break; } bytesleft -= (off_t)((q - p) + 3); objstart = p = &q[3]; objend = cli_memstr(p, bytesleft, "endobj", 6); if(objend == NULL) { cli_dbgmsg("cli_pdf: No matching endobj\n"); break; } bytesleft -= (off_t)((objend - p) + 6); p = &objend[6]; objlen = (unsigned long)(objend - objstart); /* Is this object a stream? */ streamstart = cli_memstr(objstart, objlen, "stream", 6); if(streamstart == NULL) continue; is_embedded_font = length = is_ascii85decode = is_flatedecode = 0; predictor = 1; /* * TODO: handle F and FFilter? */ q = objstart; while(q < streamstart) { if(*q == '/') { /* name object */ /*cli_dbgmsg("Name object %8.8s\n", q+1, q+1);*/ if(strncmp(++q, "Length ", 7) == 0) { q += 7; length = atoi(q); while(isdigit(*q)) q++; /* * Note: incremental updates are not * supported */ if((bytesleft > 11) && strncmp(q, " 0 R", 4) == 0) { const char *r, *nq; char b[14]; q += 4; cli_dbgmsg("cli_pdf: Length is in indirect obj %lu\n", length); snprintf(b, sizeof(b), "%lu 0 obj", length); length = (unsigned long)strlen(b); /* optimization: assume objects * are sequential */ if(!opt_failed) { nq = q; len = buf + size - q; } else { nq = buf; len = q - buf; } do { r = cli_memstr(nq, len, b, length); if (r > nq) { const char x = *(r-1); if (x == '\n' || x=='\r') { --r; break; } } if (r) { len -= r + length - nq; nq = r + length; } else if (!opt_failed) { /* we failed optimized match, * try matching from the beginning */ len = q - buf; r = nq = buf; /* prevent * infloop */ opt_failed = 1; } } while (r); if(r) { r += length - 1; r = pdf_nextobject(r, bytesleft - (r - q)); if(r) { length = atoi(r); while(isdigit(*r)) r++; cli_dbgmsg("cli_pdf: length in '%s' %lu\n", &b[1], length); } } else cli_dbgmsg("cli_pdf: Couldn't find '%s'\n", &b[1]); } q--; } else if(strncmp(q, "Length2 ", 8) == 0) is_embedded_font = 1; else if(strncmp(q, "Predictor ", 10) == 0) { q += 10; predictor = atoi(q); while(isdigit(*q)) q++; q--; } else if(strncmp(q, "FlateDecode", 11) == 0) { is_flatedecode = 1; q += 11; } else if(strncmp(q, "ASCII85Decode", 13) == 0) { is_ascii85decode = 1; q += 13; } } q = pdf_nextobject(q, (size_t)(streamstart - q)); if(q == NULL) break; } if(is_embedded_font) { /* * Need some documentation, the only I can find a * reference to is not free, if some kind soul wishes * to donate a copy, please contact me! * (http://safari.adobepress.com/0321304748) */ if(!printed_embedded_font_message) { cli_dbgmsg("cli_pdf: Embedded fonts not yet supported\n"); printed_embedded_font_message = 1; } continue; } if(predictor > 1) { /* * Needs some thought */ if(!printed_predictor_message) { cli_dbgmsg("cli_pdf: Predictor %d not honoured for embedded image\n", predictor); printed_predictor_message = 1; } continue; } /* objend points to the end of the object (start of "endobj") */ streamstart += 6; /* go past the word "stream" */ len = (int)(objend - streamstart); q = pdf_nextlinestart(streamstart, len); if(q == NULL) break; len -= (int)(q - streamstart); streamstart = q; streamend = cli_memstr(streamstart, len, "endstream\n", 10); if(streamend == NULL) { streamend = cli_memstr(streamstart, len, "endstream\r", 10); if(streamend == NULL) { cli_dbgmsg("cli_pdf: No endstream\n"); break; } has_cr = 1; } else has_cr = 0; snprintf(fullname, sizeof(fullname), "%s"PATHSEP"pdf%02u", dir, files); fout = open(fullname, O_RDWR|O_CREAT|O_EXCL|O_TRUNC|O_BINARY, 0600); if(fout < 0) { char err[128]; cli_errmsg("cli_pdf: can't create temporary file %s: %s\n", fullname, cli_strerror(errno, err, sizeof(err))); rc = CL_ETMPFILE; break; } /* * Calculate the length ourself, the Length parameter is often * wrong */ if((*--streamend != '\n') && (*streamend != '\r')) streamend++; else if(has_cr && (*--streamend != '\r')) streamend++; if(streamend <= streamstart) { close(fout); cli_dbgmsg("cli_pdf: Empty stream\n"); if (cli_unlink(fullname)) { rc = CL_EUNLINK; break; } continue; } calculated_streamlen = (int)(streamend - streamstart); real_streamlen = length; cli_dbgmsg("cli_pdf: length %lu, calculated_streamlen %lu isFlate %d isASCII85 %d\n", length, calculated_streamlen, is_flatedecode, is_ascii85decode); if(calculated_streamlen != real_streamlen) { cli_dbgmsg("cli_pdf: Incorrect Length field in file attempting to recover\n"); if(real_streamlen > calculated_streamlen) real_streamlen = calculated_streamlen; } #if 0 /* FIXME: this isn't right... */ if(length) /*streamlen = (is_flatedecode) ? length : MIN(length, streamlen);*/ streamlen = MIN(length, streamlen); #endif if(is_ascii85decode) { unsigned char *tmpbuf; int ret = cli_checklimits("cli_pdf", ctx, calculated_streamlen * 5, calculated_streamlen, real_streamlen); if(ret != CL_CLEAN) { close(fout); if (cli_unlink(fullname)) { rc = CL_EUNLINK; break; } continue; } tmpbuf = cli_malloc(calculated_streamlen * 5); if(tmpbuf == NULL) { close(fout); if (cli_unlink(fullname)) { rc = CL_EUNLINK; break; } continue; } ret = ascii85decode(streamstart, calculated_streamlen, tmpbuf); if(ret == -1) { free(tmpbuf); close(fout); if (cli_unlink(fullname)) { rc = CL_EUNLINK; break; } continue; } if(ret) { unsigned char *t; unsigned size; real_streamlen = ret; /* free unused trailing bytes */ size = real_streamlen > calculated_streamlen ? real_streamlen : calculated_streamlen; t = (unsigned char *)cli_realloc(tmpbuf,size); if(t == NULL) { free(tmpbuf); close(fout); if (cli_unlink(fullname)) { rc = CL_EUNLINK; break; } continue; } tmpbuf = t; /* * Note that it will probably be both * ascii85encoded and flateencoded */ if(is_flatedecode) rc = try_flatedecode((unsigned char *)tmpbuf, real_streamlen, real_streamlen, fout, ctx); else rc = (unsigned long)cli_writen(fout, (const char *)streamstart, real_streamlen)==real_streamlen ? CL_CLEAN : CL_EWRITE; } free(tmpbuf); } else if(is_flatedecode) { rc = try_flatedecode((unsigned char *)streamstart, real_streamlen, calculated_streamlen, fout, ctx); } else { cli_dbgmsg("cli_pdf: writing %lu bytes from the stream\n", (unsigned long)real_streamlen); if((rc = cli_checklimits("cli_pdf", ctx, real_streamlen, 0, 0))==CL_CLEAN) rc = (unsigned long)cli_writen(fout, (const char *)streamstart, real_streamlen) == real_streamlen ? CL_CLEAN : CL_EWRITE; } if (rc == CL_CLEAN) { cli_dbgmsg("cli_pdf: extracted file %u to %s\n", files, fullname); files++; lseek(fout, 0, SEEK_SET); rc = cli_magic_scandesc(fout, ctx); } close(fout); if(!ctx->engine->keeptmp) if (cli_unlink(fullname)) rc = CL_EUNLINK; if(rc != CL_CLEAN) break; } cli_dbgmsg("cli_pdf: returning %d\n", rc); return rc; } /* * flate inflation */ static int try_flatedecode(unsigned char *buf, off_t real_len, off_t calculated_len, int fout, cli_ctx *ctx) { int ret = cli_checklimits("cli_pdf", ctx, real_len, 0, 0); if (ret==CL_CLEAN && flatedecode(buf, real_len, fout, ctx) == CL_SUCCESS) return CL_CLEAN; if(real_len == calculated_len) { /* * Nothing more we can do to inflate */ cli_dbgmsg("cli_pdf: Bad compression in flate stream\n"); return CL_CLEAN; } if(cli_checklimits("cli_pdf", ctx, calculated_len, 0, 0)!=CL_CLEAN) return CL_CLEAN; ret = flatedecode(buf, calculated_len, fout, ctx); if(ret == CL_CLEAN) return CL_CLEAN; /* i.e. the PDF file is broken :-( */ cli_dbgmsg("cli_pdf: Bad compressed block length in flate stream\n"); return ret; } static int flatedecode(unsigned char *buf, off_t len, int fout, cli_ctx *ctx) { int zstat, ret; off_t nbytes; z_stream stream; unsigned char output[BUFSIZ]; #ifdef SAVE_TMP char tmpfilename[16]; int tmpfd; #endif cli_dbgmsg("cli_pdf: flatedecode %lu bytes\n", (unsigned long)len); if(len == 0) { cli_dbgmsg("cli_pdf: flatedecode len == 0\n"); return CL_CLEAN; } #ifdef SAVE_TMP /* * Copy the embedded area for debugging, so that if it falls over * we have a copy of the offending data. This is debugging code * that you shouldn't of course install in a live environment. I am * not interested in hearing about security issues with this section * of the parser. */ strcpy(tmpfilename, "/tmp/pdfXXXXXX"); tmpfd = mkstemp(tmpfilename); if(tmpfd < 0) { perror(tmpfilename); cli_errmsg("cli_pdf: Can't make debugging file\n"); } else { FILE *tmpfp = fdopen(tmpfd, "w"); if(tmpfp) { fwrite(buf, sizeof(char), len, tmpfp); fclose(tmpfp); cli_dbgmsg("cli_pdf: flatedecode: debugging file is %s\n", tmpfilename); } else cli_errmsg("cli_pdf: can't fdopen debugging file\n"); } #endif stream.zalloc = (alloc_func)Z_NULL; stream.zfree = (free_func)Z_NULL; stream.opaque = (void *)NULL; stream.next_in = (Bytef *)buf; stream.avail_in = len; stream.next_out = output; stream.avail_out = sizeof(output); zstat = inflateInit(&stream); if(zstat != Z_OK) { cli_warnmsg("cli_pdf: inflateInit failed\n"); return CL_EMEM; } nbytes = 0; while(stream.avail_in) { zstat = inflate(&stream, Z_NO_FLUSH); /* zlib */ switch(zstat) { case Z_OK: if(stream.avail_out == 0) { int written; if ((written=cli_writen(fout, output, sizeof(output)))!=sizeof(output)) { cli_errmsg("cli_pdf: failed to write output file\n"); inflateEnd(&stream); return CL_EWRITE; } nbytes += written; if((ret=cli_checklimits("cli_pdf", ctx, nbytes, 0, 0))!=CL_CLEAN) { inflateEnd(&stream); return ret; } stream.next_out = output; stream.avail_out = sizeof(output); } continue; case Z_STREAM_END: break; default: if(stream.msg) cli_dbgmsg("cli_pdf: after writing %lu bytes, got error \"%s\" inflating PDF attachment\n", (unsigned long)nbytes, stream.msg); else cli_dbgmsg("cli_pdf: after writing %lu bytes, got error %d inflating PDF attachment\n", (unsigned long)nbytes, zstat); inflateEnd(&stream); return CL_CLEAN; } break; } if(stream.avail_out != sizeof(output)) { if(cli_writen(fout, output, sizeof(output) - stream.avail_out) < 0) { cli_errmsg("cli_pdf: failed to write output file\n"); inflateEnd(&stream); return CL_EWRITE; } } #ifdef SAVE_TMP if (cli_unlink(tmpfilename)) { inflateEnd(&stream); return CL_EUNLINK; } #endif inflateEnd(&stream); return CL_CLEAN; } #endif static int asciihexdecode(const char *buf, off_t len, char *output) { unsigned i,j; for (i=0,j=0;i+1<len;i++) { if (buf[i] == ' ') continue; if (buf[i] == '>') break; if (cli_hex2str_to(buf+i, output+j, 2) == -1) { if (len - i < 4) continue; return -1; } j++; i++; } return j; } /* * ascii85 inflation, returns number of bytes in output, -1 for error * * See http://www.piclist.com/techref/method/encode.htm (look for base85) */ static int ascii85decode(const char *buf, off_t len, unsigned char *output) { const char *ptr; uint32_t sum = 0; int quintet = 0; int ret = 0; if(cli_memstr(buf, len, "~>", 2) == NULL) cli_dbgmsg("cli_pdf: ascii85decode: no EOF marker found\n"); ptr = buf; cli_dbgmsg("cli_pdf: ascii85decode %lu bytes\n", (unsigned long)len); while(len > 0) { int byte = (len--) ? (int)*ptr++ : EOF; if((byte == '~') && (len > 0) && (*ptr == '>')) byte = EOF; if(byte >= '!' && byte <= 'u') { sum = (sum * 85) + ((uint32_t)byte - '!'); if(++quintet == 5) { *output++ = (unsigned char)(sum >> 24); *output++ = (unsigned char)((sum >> 16) & 0xFF); *output++ = (unsigned char)((sum >> 8) & 0xFF); *output++ = (unsigned char)(sum & 0xFF); ret += 4; quintet = 0; sum = 0; } } else if(byte == 'z') { if(quintet) { cli_dbgmsg("cli_pdf: ascii85decode: unexpected 'z'\n"); return -1; } *output++ = '\0'; *output++ = '\0'; *output++ = '\0'; *output++ = '\0'; ret += 4; } else if(byte == EOF) { cli_dbgmsg("cli_pdf: ascii85decode: quintet %d\n", quintet); if(quintet) { int i; if(quintet == 1) { cli_dbgmsg("cli_pdf: ascii85Decode: only 1 byte in last quintet\n"); return -1; } for(i = quintet; i < 5; i++) sum *= 85; if(quintet > 1) sum += (0xFFFFFF >> ((quintet - 2) * 8)); ret += quintet-1; for(i = 0; i < quintet - 1; i++) *output++ = (unsigned char)((sum >> (24 - 8 * i)) & 0xFF); } break; } else if(!isspace(byte)) { cli_dbgmsg("cli_pdf: ascii85Decode: invalid character 0x%x, len %lu\n", byte & 0xFF, (unsigned long)len); return -1; } } return ret; } /* * Find the start of the next line */ static const char * pdf_nextlinestart(const char *ptr, size_t len) { while(strchr("\r\n", *ptr) == NULL) { if(--len == 0L) return NULL; ptr++; } while(strchr("\r\n", *ptr) != NULL) { if(--len == 0L) return NULL; ptr++; } return ptr; } /* * Return the start of the next PDF object. * This assumes that we're not in a stream. */ static const char * pdf_nextobject(const char *ptr, size_t len) { const char *p; int inobject = 1; while(len) { switch(*ptr) { case '\n': case '\r': case '%': /* comment */ p = pdf_nextlinestart(ptr, len); if(p == NULL) return NULL; len -= (size_t)(p - ptr); ptr = p; inobject = 0; break; case ' ': case '\t': case '[': /* Start of an array object */ case '\v': case '\f': case '<': /* Start of a dictionary object */ inobject = 0; ptr++; len--; break; case '/': /* Start of a name object */ return ptr; case '(': /* start of JS */ return ptr; default: if(!inobject) /* TODO: parse and return object type */ return ptr; ptr++; len--; } } return NULL; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_5629_0
crossvul-cpp_data_bad_671_1
/** * FreeRDP: A Remote Desktop Protocol Implementation * ZGFX (RDP8) Bulk Data Compression * * Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2017 Armin Novak <armin.novak@thincast.com> * Copyright 2017 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/bitstream.h> #include <freerdp/log.h> #include <freerdp/codec/zgfx.h> #define TAG FREERDP_TAG("codec") /** * RDP8 Compressor Limits: * * Maximum number of uncompressed bytes in a single segment: 65535 * Maximum match distance / minimum history size: 2500000 bytes. * Maximum number of segments: 65535 * Maximum expansion of a segment (when compressed size exceeds uncompressed): 1000 bytes * Minimum match length: 3 bytes */ struct _ZGFX_TOKEN { int prefixLength; int prefixCode; int valueBits; int tokenType; UINT32 valueBase; }; typedef struct _ZGFX_TOKEN ZGFX_TOKEN; static const ZGFX_TOKEN ZGFX_TOKEN_TABLE[] = { // len code vbits type vbase { 1, 0, 8, 0, 0 }, // 0 { 5, 17, 5, 1, 0 }, // 10001 { 5, 18, 7, 1, 32 }, // 10010 { 5, 19, 9, 1, 160 }, // 10011 { 5, 20, 10, 1, 672 }, // 10100 { 5, 21, 12, 1, 1696 }, // 10101 { 5, 24, 0, 0, 0x00 }, // 11000 { 5, 25, 0, 0, 0x01 }, // 11001 { 6, 44, 14, 1, 5792 }, // 101100 { 6, 45, 15, 1, 22176 }, // 101101 { 6, 52, 0, 0, 0x02 }, // 110100 { 6, 53, 0, 0, 0x03 }, // 110101 { 6, 54, 0, 0, 0xFF }, // 110110 { 7, 92, 18, 1, 54944 }, // 1011100 { 7, 93, 20, 1, 317088 }, // 1011101 { 7, 110, 0, 0, 0x04 }, // 1101110 { 7, 111, 0, 0, 0x05 }, // 1101111 { 7, 112, 0, 0, 0x06 }, // 1110000 { 7, 113, 0, 0, 0x07 }, // 1110001 { 7, 114, 0, 0, 0x08 }, // 1110010 { 7, 115, 0, 0, 0x09 }, // 1110011 { 7, 116, 0, 0, 0x0A }, // 1110100 { 7, 117, 0, 0, 0x0B }, // 1110101 { 7, 118, 0, 0, 0x3A }, // 1110110 { 7, 119, 0, 0, 0x3B }, // 1110111 { 7, 120, 0, 0, 0x3C }, // 1111000 { 7, 121, 0, 0, 0x3D }, // 1111001 { 7, 122, 0, 0, 0x3E }, // 1111010 { 7, 123, 0, 0, 0x3F }, // 1111011 { 7, 124, 0, 0, 0x40 }, // 1111100 { 7, 125, 0, 0, 0x80 }, // 1111101 { 8, 188, 20, 1, 1365664 }, // 10111100 { 8, 189, 21, 1, 2414240 }, // 10111101 { 8, 252, 0, 0, 0x0C }, // 11111100 { 8, 253, 0, 0, 0x38 }, // 11111101 { 8, 254, 0, 0, 0x39 }, // 11111110 { 8, 255, 0, 0, 0x66 }, // 11111111 { 9, 380, 22, 1, 4511392 }, // 101111100 { 9, 381, 23, 1, 8705696 }, // 101111101 { 9, 382, 24, 1, 17094304 }, // 101111110 { 0 } }; #define zgfx_GetBits(_zgfx, _nbits) \ while (_zgfx->cBitsCurrent < _nbits) { \ _zgfx->BitsCurrent <<= 8; \ if (_zgfx->pbInputCurrent < _zgfx->pbInputEnd) \ _zgfx->BitsCurrent += *(_zgfx->pbInputCurrent)++; \ _zgfx->cBitsCurrent += 8; \ } \ _zgfx->cBitsRemaining -= _nbits; \ _zgfx->cBitsCurrent -= _nbits; \ _zgfx->bits = _zgfx->BitsCurrent >> _zgfx->cBitsCurrent; \ _zgfx->BitsCurrent &= ((1 << _zgfx->cBitsCurrent) - 1); static void zgfx_history_buffer_ring_write(ZGFX_CONTEXT* zgfx, const BYTE* src, size_t count) { UINT32 front; if (count <= 0) return; if (count > zgfx->HistoryBufferSize) { const size_t residue = count - zgfx->HistoryBufferSize; count = zgfx->HistoryBufferSize; src += residue; zgfx->HistoryIndex = (zgfx->HistoryIndex + residue) % zgfx->HistoryBufferSize; } if (zgfx->HistoryIndex + count <= zgfx->HistoryBufferSize) { CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, count); if ((zgfx->HistoryIndex += count) == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; } else { front = zgfx->HistoryBufferSize - zgfx->HistoryIndex; CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, front); CopyMemory(zgfx->HistoryBuffer, &src[front], count - front); zgfx->HistoryIndex = count - front; } } static void zgfx_history_buffer_ring_read(ZGFX_CONTEXT* zgfx, int offset, BYTE* dst, UINT32 count) { UINT32 front; UINT32 index; UINT32 bytes; UINT32 valid; UINT32 bytesLeft; BYTE* dptr = dst; BYTE* origDst = dst; if (count <= 0) return; bytesLeft = count; index = (zgfx->HistoryIndex + zgfx->HistoryBufferSize - offset) % zgfx->HistoryBufferSize; bytes = MIN(bytesLeft, offset); if ((index + bytes) <= zgfx->HistoryBufferSize) { CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), bytes); } else { front = zgfx->HistoryBufferSize - index; CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), front); CopyMemory(&dptr[front], zgfx->HistoryBuffer, bytes - front); } if ((bytesLeft -= bytes) == 0) return; dptr += bytes; valid = bytes; do { bytes = valid; if (bytes > bytesLeft) bytes = bytesLeft; CopyMemory(dptr, origDst, bytes); dptr += bytes; valid <<= 1; } while ((bytesLeft -= bytes) > 0); } static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; int extra; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; } int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32 flags) { int status = -1; BYTE descriptor; wStream* stream = Stream_New((BYTE*)pSrcData, SrcSize); if (!stream) return -1; if (Stream_GetRemainingLength(stream) < 1) goto fail; Stream_Read_UINT8(stream, descriptor); /* descriptor (1 byte) */ if (descriptor == ZGFX_SEGMENTED_SINGLE) { if (!zgfx_decompress_segment(zgfx, stream, Stream_GetRemainingLength(stream))) goto fail; *ppDstData = NULL; if (zgfx->OutputCount > 0) *ppDstData = (BYTE*) malloc(zgfx->OutputCount); if (!*ppDstData) goto fail; *pDstSize = zgfx->OutputCount; CopyMemory(*ppDstData, zgfx->OutputBuffer, zgfx->OutputCount); } else if (descriptor == ZGFX_SEGMENTED_MULTIPART) { UINT32 segmentSize; UINT16 segmentNumber; UINT16 segmentCount; UINT32 uncompressedSize; BYTE* pConcatenated; if (Stream_GetRemainingLength(stream) < 6) goto fail; Stream_Read_UINT16(stream, segmentCount); /* segmentCount (2 bytes) */ Stream_Read_UINT32(stream, uncompressedSize); /* uncompressedSize (4 bytes) */ if (Stream_GetRemainingLength(stream) < segmentCount * sizeof(UINT32)) goto fail; pConcatenated = (BYTE*) malloc(uncompressedSize); if (!pConcatenated) goto fail; *ppDstData = pConcatenated; *pDstSize = uncompressedSize; for (segmentNumber = 0; segmentNumber < segmentCount; segmentNumber++) { if (Stream_GetRemainingLength(stream) < sizeof(UINT32)) goto fail; Stream_Read_UINT32(stream, segmentSize); /* segmentSize (4 bytes) */ if (!zgfx_decompress_segment(zgfx, stream, segmentSize)) goto fail; CopyMemory(pConcatenated, zgfx->OutputBuffer, zgfx->OutputCount); pConcatenated += zgfx->OutputCount; } } else { goto fail; } status = 1; fail: Stream_Free(stream, FALSE); return status; } static BOOL zgfx_compress_segment(ZGFX_CONTEXT* zgfx, wStream* s, const BYTE* pSrcData, UINT32 SrcSize, UINT32* pFlags) { /* FIXME: Currently compression not implemented. Just copy the raw source */ if (!Stream_EnsureRemainingCapacity(s, SrcSize + 1)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return FALSE; } (*pFlags) |= ZGFX_PACKET_COMPR_TYPE_RDP8; /* RDP 8.0 compression format */ Stream_Write_UINT8(s, (*pFlags)); /* header (1 byte) */ Stream_Write(s, pSrcData, SrcSize); return TRUE; } int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst, const BYTE* pUncompressed, UINT32 uncompressedSize, UINT32* pFlags) { int fragment; UINT16 maxLength; UINT32 totalLength; size_t posSegmentCount = 0; const BYTE* pSrcData; int status = 0; maxLength = ZGFX_SEGMENTED_MAXSIZE; totalLength = uncompressedSize; pSrcData = pUncompressed; for (fragment = 0; (totalLength > 0) || (fragment == 0); fragment++) { UINT32 SrcSize; size_t posDstSize; size_t posDataStart; UINT32 DstSize; SrcSize = (totalLength > maxLength) ? maxLength : totalLength; posDstSize = 0; totalLength -= SrcSize; /* Ensure we have enough space for headers */ if (!Stream_EnsureRemainingCapacity(sDst, 12)) { WLog_ERR(TAG, "Stream_EnsureRemainingCapacity failed!"); return -1; } if (fragment == 0) { /* First fragment */ /* descriptor (1 byte) */ Stream_Write_UINT8(sDst, (totalLength == 0) ? ZGFX_SEGMENTED_SINGLE : ZGFX_SEGMENTED_MULTIPART); if (totalLength > 0) { posSegmentCount = Stream_GetPosition(sDst); /* segmentCount (2 bytes) */ Stream_Seek(sDst, 2); Stream_Write_UINT32(sDst, uncompressedSize); /* uncompressedSize (4 bytes) */ } } if (fragment > 0 || totalLength > 0) { /* Multipart */ posDstSize = Stream_GetPosition(sDst); /* size (4 bytes) */ Stream_Seek(sDst, 4); } posDataStart = Stream_GetPosition(sDst); if (!zgfx_compress_segment(zgfx, sDst, pSrcData, SrcSize, pFlags)) return -1; if (posDstSize) { /* Fill segment data size */ DstSize = Stream_GetPosition(sDst) - posDataStart; Stream_SetPosition(sDst, posDstSize); Stream_Write_UINT32(sDst, DstSize); Stream_SetPosition(sDst, posDataStart + DstSize); } pSrcData += SrcSize; } Stream_SealLength(sDst); /* fill back segmentCount */ if (posSegmentCount) { Stream_SetPosition(sDst, posSegmentCount); Stream_Write_UINT16(sDst, fragment); Stream_SetPosition(sDst, Stream_Length(sDst)); } return status; } int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32* pFlags) { int status; wStream* s = Stream_New(NULL, SrcSize); status = zgfx_compress_to_stream(zgfx, s, pSrcData, SrcSize, pFlags); (*ppDstData) = Stream_Buffer(s); (*pDstSize) = Stream_GetPosition(s); Stream_Free(s, FALSE); return status; } void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush) { zgfx->HistoryIndex = 0; } ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor) { ZGFX_CONTEXT* zgfx; zgfx = (ZGFX_CONTEXT*) calloc(1, sizeof(ZGFX_CONTEXT)); if (zgfx) { zgfx->Compressor = Compressor; zgfx->HistoryBufferSize = sizeof(zgfx->HistoryBuffer); zgfx_context_reset(zgfx, FALSE); } return zgfx; } void zgfx_context_free(ZGFX_CONTEXT* zgfx) { free(zgfx); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_671_1
crossvul-cpp_data_bad_2213_0
/* * LZO1X Decompressor from LZO * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * * Changed for Linux kernel use by: * Nitin Gupta <nitingupta910@gmail.com> * Richard Purdie <rpurdie@openedhand.com> */ #ifndef STATIC #include <linux/module.h> #include <linux/kernel.h> #endif #include <asm/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" #define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) #define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) #define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) { unsigned char *op; const unsigned char *ip; size_t t, next; size_t state = 0; const unsigned char *m_pos; const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; if (*ip > 17) { t = *ip++ - 17; if (t < 4) { next = t; goto match_next; } goto copy_literal_run; } for (;;) { t = *ip++; if (t < 16) { if (likely(state == 0)) { if (unlikely(t == 0)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1); } t += 15 + *ip++; } t += 3; copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { COPY8(op, ip); op += 8; ip += 8; COPY8(op, ip); op += 8; ip += 8; } while (ip < ie); ip = ie; op = oe; } else #endif { NEED_OP(t); NEED_IP(t + 3); do { *op++ = *ip++; } while (--t > 0); } state = 4; continue; } else if (state != 4) { next = t & 3; m_pos = op - 1; m_pos -= t >> 2; m_pos -= *ip++ << 2; TEST_LB(m_pos); NEED_OP(2); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; goto match_next; } else { next = t & 3; m_pos = op - (1 + M2_MAX_OFFSET); m_pos -= t >> 2; m_pos -= *ip++ << 2; t = 3; } } else if (t >= 64) { next = t & 3; m_pos = op - 1; m_pos -= (t >> 2) & 7; m_pos -= *ip++ << 3; t = (t >> 5) - 1 + (3 - 1); } else if (t >= 32) { t = (t & 31) + (3 - 1); if (unlikely(t == 2)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1); } t += 31 + *ip++; NEED_IP(2); } m_pos = op - 1; next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; } else { m_pos = op; m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); if (unlikely(t == 2)) { while (unlikely(*ip == 0)) { t += 255; ip++; NEED_IP(1); } t += 7 + *ip++; NEED_IP(2); } next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; if (m_pos == op) goto eof_found; m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (op - m_pos >= 8) { unsigned char *oe = op + t; if (likely(HAVE_OP(t + 15))) { do { COPY8(op, m_pos); op += 8; m_pos += 8; COPY8(op, m_pos); op += 8; m_pos += 8; } while (op < oe); op = oe; if (HAVE_IP(6)) { state = next; COPY4(op, ip); op += next; ip += next; continue; } } else { NEED_OP(t); do { *op++ = *m_pos++; } while (op < oe); } } else #endif { unsigned char *oe = op + t; NEED_OP(t); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; m_pos += 2; do { *op++ = *m_pos++; } while (op < oe); } match_next: state = next; t = next; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(6) && HAVE_OP(4))) { COPY4(op, ip); op += t; ip += t; } else #endif { NEED_IP(t + 3); NEED_OP(t); while (t > 0) { *op++ = *ip++; t--; } } } eof_found: *out_len = op - out; return (t != 3 ? LZO_E_ERROR : ip == ip_end ? LZO_E_OK : ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); input_overrun: *out_len = op - out; return LZO_E_INPUT_OVERRUN; output_overrun: *out_len = op - out; return LZO_E_OUTPUT_OVERRUN; lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; } #ifndef STATIC EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X Decompressor"); #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_2213_0
crossvul-cpp_data_bad_5800_0
/* * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, * Frank Blaschka <frank.blaschka@de.ibm.com> */ #define KMSG_COMPONENT "qeth" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mii.h> #include <linux/kthread.h> #include <linux/slab.h> #include <net/iucv/af_iucv.h> #include <asm/ebcdic.h> #include <asm/io.h> #include <asm/sysinfo.h> #include <asm/compat.h> #include "qeth_core.h" struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 128, 3, &debug_sprintf_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; EXPORT_SYMBOL_GPL(qeth_dbf); struct qeth_card_list_struct qeth_core_card_list; EXPORT_SYMBOL_GPL(qeth_core_card_list); struct kmem_cache *qeth_core_header_cache; EXPORT_SYMBOL_GPL(qeth_core_header_cache); static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; static struct lock_class_key qdio_out_skb_queue_key; static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); static int qeth_issue_next_read(struct qeth_card *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_buffers(struct qeth_card *); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static struct workqueue_struct *qeth_wq; static void qeth_close_dev_handler(struct work_struct *work) { struct qeth_card *card; card = container_of(work, struct qeth_card, close_dev_work); QETH_CARD_TEXT(card, 2, "cldevhdl"); rtnl_lock(); dev_close(card->dev); rtnl_unlock(); ccwgroup_set_offline(card->gdev); } void qeth_close_dev(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "cldevsubm"); queue_work(qeth_wq, &card->close_dev_work); } EXPORT_SYMBOL_GPL(qeth_close_dev); static inline const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " Virtual NIC QDIO"; case QETH_CARD_TYPE_IQD: return " Virtual NIC Hiper"; case QETH_CARD_TYPE_OSM: return " Virtual NIC QDIO - OSM"; case QETH_CARD_TYPE_OSX: return " Virtual NIC QDIO - OSX"; default: return " unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; case QETH_CARD_TYPE_OSN: return " OSN QDIO"; case QETH_CARD_TYPE_OSM: return " OSM QDIO"; case QETH_CARD_TYPE_OSX: return " OSX QDIO"; default: return " unknown"; } } return " n/a"; } /* max length to be returned: 14 */ const char *qeth_get_cardname_short(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return "Virt.NIC QDIO"; case QETH_CARD_TYPE_IQD: return "Virt.NIC Hiper"; case QETH_CARD_TYPE_OSM: return "Virt.NIC OSM"; case QETH_CARD_TYPE_OSX: return "Virt.NIC OSX"; default: return "unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_FAST_ETH: return "OSD_100"; case QETH_LINK_TYPE_HSTR: return "HSTR"; case QETH_LINK_TYPE_GBIT_ETH: return "OSD_1000"; case QETH_LINK_TYPE_10GBIT_ETH: return "OSD_10GIG"; case QETH_LINK_TYPE_LANE_ETH100: return "OSD_FE_LANE"; case QETH_LINK_TYPE_LANE_TR: return "OSD_TR_LANE"; case QETH_LINK_TYPE_LANE_ETH1000: return "OSD_GbE_LANE"; case QETH_LINK_TYPE_LANE: return "OSD_ATM_LANE"; default: return "OSD_Express"; } case QETH_CARD_TYPE_IQD: return "HiperSockets"; case QETH_CARD_TYPE_OSN: return "OSN"; case QETH_CARD_TYPE_OSM: return "OSM_1000"; case QETH_CARD_TYPE_OSX: return "OSX_10GIG"; default: return "unknown"; } } return "n/a"; } void qeth_set_recovery_task(struct qeth_card *card) { card->recovery_task = current; } EXPORT_SYMBOL_GPL(qeth_set_recovery_task); void qeth_clear_recovery_task(struct qeth_card *card) { card->recovery_task = NULL; } EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); static bool qeth_is_recovery_task(const struct qeth_card *card) { return card->recovery_task == current; } void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_allowed_mask = threads; if (clear_start_mask) card->thread_start_mask &= threads; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); int qeth_threads_running(struct qeth_card *card, unsigned long threads) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); rc = (card->thread_running_mask & threads); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { if (qeth_is_recovery_task(card)) return 0; return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } EXPORT_SYMBOL_GPL(qeth_wait_for_threads); void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; QETH_CARD_TEXT(card, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); } } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); static int qeth_alloc_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry; void *ptr; int i, j; QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); if (!pool_entry) { qeth_free_buffer_pool(card); return -ENOMEM; } for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { ptr = (void *) __get_free_page(GFP_KERNEL); if (!ptr) { while (j > 0) free_page((unsigned long) pool_entry->elements[--j]); kfree(pool_entry); qeth_free_buffer_pool(card); return -ENOMEM; } pool_entry->elements[j] = ptr; } list_add(&pool_entry->init_list, &card->qdio.init_pool.entry_list); } return 0; } int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { QETH_CARD_TEXT(card, 2, "realcbp"); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) return -EPERM; /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ qeth_clear_working_pool_list(card); qeth_free_buffer_pool(card); card->qdio.in_buf_pool.buf_count = bufcnt; card->qdio.init_pool.buf_count = bufcnt; return qeth_alloc_buffer_pool(card); } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); static inline int qeth_cq_init(struct qeth_card *card) { int rc; if (card->options.cq == QETH_CQ_ENABLED) { QETH_DBF_TEXT(SETUP, 2, "cqinit"); memset(card->qdio.c_q->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); card->qdio.c_q->next_buf_to_init = 127; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, card->qdio.no_in_queues - 1, 0, 127); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); goto out; } } rc = 0; out: return rc; } static inline int qeth_alloc_cq(struct qeth_card *card) { int rc; if (card->options.cq == QETH_CQ_ENABLED) { int i; struct qdio_outbuf_state *outbuf_states; QETH_DBF_TEXT(SETUP, 2, "cqon"); card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); if (!card->qdio.c_q) { rc = -1; goto kmsg_out; } QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *)); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { card->qdio.c_q->bufs[i].buffer = &card->qdio.c_q->qdio_bufs[i]; } card->qdio.no_in_queues = 2; card->qdio.out_bufstates = kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_outbuf_state), GFP_KERNEL); outbuf_states = card->qdio.out_bufstates; if (outbuf_states == NULL) { rc = -1; goto free_cq_out; } for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i]->bufstates = outbuf_states; outbuf_states += QDIO_MAX_BUFFERS_PER_Q; } } else { QETH_DBF_TEXT(SETUP, 2, "nocq"); card->qdio.c_q = NULL; card->qdio.no_in_queues = 1; } QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); rc = 0; out: return rc; free_cq_out: kfree(card->qdio.c_q); card->qdio.c_q = NULL; kmsg_out: dev_err(&card->gdev->dev, "Failed to create completion queue\n"); goto out; } static inline void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; kfree(card->qdio.c_q); card->qdio.c_q = NULL; } kfree(card->qdio.out_bufstates); card->qdio.out_bufstates = NULL; } static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, int delayed) { enum iucv_tx_notify n; switch (sbalf15) { case 0: n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; break; case 4: case 16: case 17: case 18: n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : TX_NOTIFY_UNREACHABLE; break; default: n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : TX_NOTIFY_GENERALERROR; break; } return n; } static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, int forced_cleanup) { if (q->card->options.cq != QETH_CQ_ENABLED) return; if (q->bufs[bidx]->next_pending != NULL) { struct qeth_qdio_out_buffer *head = q->bufs[bidx]; struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; while (c) { if (forced_cleanup || atomic_read(&c->state) == QETH_QDIO_BUF_HANDLED_DELAYED) { struct qeth_qdio_out_buffer *f = c; QETH_CARD_TEXT(f->q->card, 5, "fp"); QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); /* release here to avoid interleaving between outbound tasklet and inbound tasklet regarding notifications and lifecycle */ qeth_release_skbs(c); c = f->next_pending; WARN_ON_ONCE(head->next_pending != f); head->next_pending = c; kmem_cache_free(qeth_qdio_outbuf_cache, f); } else { head = c; c = c->next_pending; } } } if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == QETH_QDIO_BUF_HANDLED_DELAYED)) { /* for recovery situations */ q->bufs[bidx]->aob = q->bufstates[bidx].aob; qeth_init_qdio_out_buf(q, bidx); QETH_CARD_TEXT(q->card, 2, "clprecov"); } } static inline void qeth_qdio_handle_aob(struct qeth_card *card, unsigned long phys_aob_addr) { struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); buffer = (struct qeth_qdio_out_buffer *) aob->user1; QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { notification = TX_NOTIFY_OK; } else { WARN_ON_ONCE(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING); atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); notification = TX_NOTIFY_DELAYED_OK; } if (aob->aorc != 0) { QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); notification = qeth_compute_cq_notification(aob->aorc, 1); } qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; qeth_clear_output_buffer(buffer->q, buffer, QETH_QDIO_BUF_HANDLED_DELAYED); /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) { return card->options.cq == QETH_CQ_ENABLED && card->qdio.c_q != NULL && queue != 0 && queue == card->qdio.no_in_queues - 1; } static int qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 5, "issnxrd"); if (card->read.state != CH_STATE_UP) return -EIO; iob = qeth_get_buffer(&card->read); if (!iob) { dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); } return rc; } static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); if (reply) { atomic_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); reply->card = card; } return reply; } static void qeth_get_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); atomic_inc(&reply->refcnt); } static void qeth_put_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); if (atomic_dec_and_test(&reply->refcnt)) kfree(reply); } static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) { char *ipa_name; int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); if (rc) QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " "x%X \"%s\"\n", ipa_name, com, dev_name(&card->gdev->dev), QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc)); else QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", ipa_name, com, dev_name(&card->gdev->dev), QETH_CARD_IFNAME(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 5, "chkipad"); if (IS_IPA(iob->data)) { cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); if (IS_IPA_REPLY(cmd)) { if (cmd->hdr.command != IPA_CMD_SETCCID && cmd->hdr.command != IPA_CMD_DELCCID && cmd->hdr.command != IPA_CMD_MODCCID && cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); return cmd; } else { switch (cmd->hdr.command) { case IPA_CMD_STOPLAN: if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { dev_err(&card->gdev->dev, "Interface %s is down because the " "adjacent port is no longer in " "reflective relay mode\n", QETH_CARD_IFNAME(card)); qeth_close_dev(card); } else { dev_warn(&card->gdev->dev, "The link for interface %s on CHPID" " 0x%X failed\n", QETH_CARD_IFNAME(card), card->info.chpid); qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); } card->lan_online = 0; if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: dev_info(&card->gdev->dev, "The link for %s on CHPID 0x%X has" " been restored\n", QETH_CARD_IFNAME(card), card->info.chpid); netif_carrier_on(card->dev); card->lan_online = 1; if (card->info.hwtrap) card->info.hwtrap = 2; qeth_schedule_recovery(card); return NULL; case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: QETH_CARD_TEXT(card, 3, "irla"); break; case IPA_CMD_UNREGISTER_LOCAL_ADDR: QETH_CARD_TEXT(card, 3, "urla"); break; default: QETH_DBF_MESSAGE(2, "Received data is IPA " "but not a reply!\n"); break; } } } return cmd; } void qeth_clear_ipacmd_list(struct qeth_card *card) { struct qeth_reply *reply, *r; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_get_reply(reply); reply->rc = -EIO; atomic_inc(&reply->received); list_del_init(&reply->list); wake_up(&reply->wait_q); qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { if (!buffer) return 0; QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " "with cause code 0x%02x%s\n", buffer[4], ((buffer[4] == 0x22) ? " -- try another portname" : "")); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); if (buffer[4] == 0xf6) { dev_err(&card->gdev->dev, "The qeth device is not configured " "for the OSI layer required by z/VM\n"); return -EPERM; } return -EIO; } return 0; } static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, __u32 len) { struct qeth_card *card; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "setupccw"); if (channel == &card->read) memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); else memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = len; channel->ccw.cda = (__u32) __pa(iob); } static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { channel->iob[index].state = BUF_STATE_LOCKED; channel->io_buf_no = (channel->io_buf_no + 1) % QETH_CMD_BUFFER_NO; memset(channel->iob[index].data, 0, QETH_BUFSIZE); return channel->iob + index; } index = (index + 1) % QETH_CMD_BUFFER_NO; } while (index != channel->io_buf_no); return NULL; } void qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { unsigned long flags; QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; spin_unlock_irqrestore(&channel->iob_lock, flags); wake_up(&channel->wait_q); } EXPORT_SYMBOL_GPL(qeth_release_buffer); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; unsigned long flags; spin_lock_irqsave(&channel->iob_lock, flags); buffer = __qeth_get_buffer(channel); spin_unlock_irqrestore(&channel->iob_lock, flags); return buffer; } struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer; wait_event(channel->wait_q, ((buffer = qeth_get_buffer(channel)) != NULL)); return buffer; } EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); void qeth_clear_cmd_buffers(struct qeth_channel *channel) { int cnt; for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); static void qeth_send_control_data_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; struct qeth_reply *reply, *r; struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; int rc = 0; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 4, "sndctlcb"); rc = qeth_check_idx_response(card, iob->data); switch (rc) { case 0: break; case -EIO: qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: goto out; } cmd = qeth_check_ipa_data(card, iob); if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) goto out; /*in case of OSN : check if cmd is set */ if (card->info.type == QETH_CARD_TYPE_OSN && cmd && cmd->hdr.command != IPA_CMD_STARTLAN && card->osn_info.assist_cb != NULL) { card->osn_info.assist_cb(card->dev, cmd); goto out; } spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || ((cmd) && (reply->seqno == cmd->hdr.seqno))) { qeth_get_reply(reply); list_del_init(&reply->list); spin_unlock_irqrestore(&card->lock, flags); keep_reply = 0; if (reply->callback != NULL) { if (cmd) { reply->offset = (__u16)((char *)cmd - (char *)iob->data); keep_reply = reply->callback(card, reply, (unsigned long)cmd); } else keep_reply = reply->callback(card, reply, (unsigned long)iob); } if (cmd) reply->rc = (u16) cmd->hdr.return_code; else if (iob->rc) reply->rc = iob->rc; if (keep_reply) { spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); } else { atomic_inc(&reply->received); wake_up(&reply->wait_q); } qeth_put_reply(reply); goto out; } } spin_unlock_irqrestore(&card->lock, flags); out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), QETH_SEQ_NO_LENGTH); qeth_release_buffer(channel, iob); } static int qeth_setup_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "setupch"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { channel->iob[cnt].data = kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); if (channel->iob[cnt].data == NULL) break; channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; channel->iob[cnt].callback = qeth_send_control_data_cb; channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { while (cnt-- > 0) kfree(channel->iob[cnt].data); return -ENOMEM; } channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); init_waitqueue_head(&channel->wait_q); return 0; } static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); if (!(card->thread_allowed_mask & thread) || (card->thread_start_mask & thread)) { spin_unlock_irqrestore(&card->thread_mask_lock, flags); return -EPERM; } card->thread_start_mask |= thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); return 0; } void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_start_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); if (card->thread_start_mask & thread) { if ((card->thread_allowed_mask & thread) && !(card->thread_running_mask & thread)) { rc = 1; card->thread_start_mask &= ~thread; card->thread_running_mask |= thread; } else rc = -EPERM; } spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { int rc = 0; wait_event(card->wait_q, (rc = __qeth_do_run_thread(card, thread)) >= 0); return rc; } EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "startrec"); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } EXPORT_SYMBOL_GPL(qeth_schedule_recovery); static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) { int dstat, cstat; char *sense; struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { QETH_CARD_TEXT(card, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", dev_name(&cdev->dev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); return 1; } if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { QETH_CARD_TEXT(card, 2, "REVIND"); return 1; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { QETH_CARD_TEXT(card, 2, "CMDREJi"); return 1; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { QETH_CARD_TEXT(card, 2, "AFFE"); return 1; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } QETH_CARD_TEXT(card, 2, "DGENCHK"); return 1; } return 0; } static long __qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qeth_card *card; card = CARD_FROM_CDEV(cdev); if (!IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { case -EIO: QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", dev_name(&cdev->dev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); break; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { if (card && (card->data.ccwdev == cdev)) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } } break; default: QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", dev_name(&cdev->dev), PTR_ERR(irb)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT(card, 2, " rc???"); } return PTR_ERR(irb); } static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { int rc; int cstat, dstat; struct qeth_cmd_buffer *buffer; struct qeth_channel *channel; struct qeth_card *card; struct qeth_cmd_buffer *iob; __u8 index; if (__qeth_check_irb_error(cdev, intparm, irb)) return; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) return; QETH_CARD_TEXT(card, 5, "irq"); if (card->read.ccwdev == cdev) { channel = &card->read; QETH_CARD_TEXT(card, 5, "read"); } else if (card->write.ccwdev == cdev) { channel = &card->write; QETH_CARD_TEXT(card, 5, "write"); } else { channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) channel->state = CH_STATE_STOPPED; if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) channel->state = CH_STATE_HALTED; /*let's wake up immediately on data channel*/ if ((channel == &card->data) && (intparm != 0) && (intparm != QETH_RCD_PARM)) goto out; if (intparm == QETH_CLEAR_CHANNEL_PARM) { QETH_CARD_TEXT(card, 6, "clrchpar"); /* we don't have to handle this further */ intparm = 0; } if (intparm == QETH_HALT_CHANNEL_PARM) { QETH_CARD_TEXT(card, 6, "hltchpar"); /* we don't have to handle this further */ intparm = 0; } if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { if (irb->esw.esw0.erw.cons) { dev_warn(&channel->ccwdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s sense data available. cstat " "0x%X dstat 0x%X\n", dev_name(&channel->ccwdev->dev), cstat, dstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); print_hex_dump(KERN_WARNING, "qeth: sense data ", DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_DOWN; goto out; } rc = qeth_get_problem(cdev, irb); if (rc) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; } } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_RCD_DONE; goto out; } if (intparm) { buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); buffer->state = BUF_STATE_PROCESSED; } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; while (iob[index].state == BUF_STATE_PROCESSED) { if (iob[index].callback != NULL) iob[index].callback(channel, iob + index); index = (index + 1) % QETH_CMD_BUFFER_NO; } channel->buf_no = index; out: wake_up(&card->wait_q); return; } static void qeth_notify_skbs(struct qeth_qdio_out_q *q, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification) { struct sk_buff *skb; if (skb_queue_empty(&buf->skb_list)) goto out; skb = skb_peek(&buf->skb_list); while (skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); if (skb->protocol == ETH_P_AF_IUCV) { if (skb->sk) { struct iucv_sock *iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, notification); } } if (skb_queue_is_last(&buf->skb_list, skb)) skb = NULL; else skb = skb_queue_next(&buf->skb_list, skb); } out: return; } static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { struct sk_buff *skb; struct iucv_sock *iucv; int notify_general_error = 0; if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) notify_general_error = 1; /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); skb = skb_dequeue(&buf->skb_list); while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { if (skb->sk) { iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); } } atomic_dec(&skb->users); dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); } } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum qeth_qdio_buffer_states newbufstate) { int i; /* is PCI flag set on buffer? */ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); if (newbufstate == QETH_QDIO_BUF_EMPTY) { qeth_release_skbs(buf); } for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; buf->buffer->element[i].length = 0; buf->buffer->element[i].addr = NULL; buf->buffer->element[i].eflags = 0; buf->buffer->element[i].sflags = 0; } buf->buffer->element[15].eflags = 0; buf->buffer->element[15].sflags = 0; buf->next_element_to_fill = 0; atomic_set(&buf->state, newbufstate); } static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) { int j; for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (!q->bufs[j]) continue; qeth_cleanup_handled_pending(q, j, 1); qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); if (free) { kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q->bufs[j] = NULL; } } } void qeth_clear_qdio_buffers(struct qeth_card *card) { int i; QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) { if (card->qdio.out_qs[i]) { qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); } } } EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); static void qeth_free_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; int i = 0; list_for_each_entry_safe(pool_entry, tmp, &card->qdio.init_pool.entry_list, init_list){ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) free_page((unsigned long)pool_entry->elements[i]); list_del(&pool_entry->init_list); kfree(pool_entry); } } static void qeth_free_qdio_buffers(struct qeth_card *card) { int i, j; if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; qeth_free_cq(card); cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); } kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ qeth_free_buffer_pool(card); /* free outbound qdio_qs */ if (card->qdio.out_qs) { for (i = 0; i < card->qdio.no_out_queues; ++i) { qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); kfree(card->qdio.out_qs[i]); } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; } } static void qeth_clean_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); } static void qeth_set_single_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && (card->qdio.no_out_queues == 4)) qeth_free_qdio_buffers(card); card->qdio.no_out_queues = 1; if (card->qdio.default_out_queue != 0) dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); card->qdio.default_out_queue = 0; } static void qeth_set_multiple_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && (card->qdio.no_out_queues == 1)) { qeth_free_qdio_buffers(card); card->qdio.default_out_queue = 2; } card->qdio.no_out_queues = 4; } static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channelPath_dsc { u8 flags; u8 lsn; u8 desc; u8 chpid; u8 swla; u8 zeroes; u8 chla; u8 chpp; } *chp_dsc; QETH_DBF_TEXT(SETUP, 2, "chp_desc"); ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) goto out; card->info.func_level = 0x4100 + chp_dsc->desc; if (card->info.type == QETH_CARD_TYPE_IQD) goto out; /* CHPP field bit 6 == 1 -> single queue */ if ((chp_dsc->chpp & 0x02) == 0x02) qeth_set_single_write_queues(card); else qeth_set_multiple_write_queues(card); out: kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); } static void qeth_init_qdio_info(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 4, "intqdinf"); atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); /* inbound */ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; if (card->info.type == QETH_CARD_TYPE_IQD) card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; else card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); } static void qeth_set_intial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; card->options.cq = QETH_CQ_DISABLED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", (u8) card->thread_start_mask, (u8) card->thread_allowed_mask, (u8) card->thread_running_mask); rc = (card->thread_start_mask & thread); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } static void qeth_start_kernel_thread(struct work_struct *work) { struct task_struct *ts; struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_CARD_TEXT(card , 2, "strthrd"); if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { ts = kthread_run(card->discipline->recover, (void *)card, "qeth_recover"); if (IS_ERR(ts)) { qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); } } } static int qeth_setup_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "setupcrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->read.state = CH_STATE_DOWN; card->write.state = CH_STATE_DOWN; card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; card->read_or_write_problem = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); INIT_LIST_HEAD(&card->ip_list); INIT_LIST_HEAD(card->ip_tbd_list); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); /* initial options */ qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); card->ipato.enabled = 0; card->ipato.invert4 = 0; card->ipato.invert6 = 0; /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); return 0; } static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) { struct qeth_card *card = container_of(slr, struct qeth_card, qeth_service_level); if (card->info.mcl_level[0]) seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), card->info.mcl_level); } static struct qeth_card *qeth_alloc_card(void) { struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "alloccrd"); card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); if (!card->ip_tbd_list) { QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); goto out_card; } if (qeth_setup_channel(&card->read)) goto out_ip; if (qeth_setup_channel(&card->write)) goto out_channel; card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; out_channel: qeth_clean_channel(&card->read); out_ip: kfree(card->ip_tbd_list); out_card: kfree(card); out: return NULL; } static int qeth_determine_card_type(struct qeth_card *card) { int i = 0; QETH_DBF_TEXT(SETUP, 2, "detcdtyp"); card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; while (known_devices[i][QETH_DEV_MODEL_IND]) { if ((CARD_RDEV(card)->id.dev_type == known_devices[i][QETH_DEV_TYPE_IND]) && (CARD_RDEV(card)->id.dev_model == known_devices[i][QETH_DEV_MODEL_IND])) { card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; card->qdio.no_out_queues = known_devices[i][QETH_QUEUE_NO_IND]; card->qdio.no_in_queues = 1; card->info.is_multicast_different = known_devices[i][QETH_MULTICAST_IND]; qeth_update_from_chp_desc(card); return 0; } i++; } card->info.type = QETH_CARD_TYPE_UNKNOWN; dev_err(&card->gdev->dev, "The adapter hardware is of an " "unknown type\n"); return -ENOENT; } static int qeth_clear_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_STOPPED) return -ETIME; channel->state = CH_STATE_DOWN; return 0; } static int qeth_halt_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; card = CARD_FROM_CDEV(channel->ccwdev); QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_HALTED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_HALTED) return -ETIME; return 0; } static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "haltchs"); rc1 = qeth_halt_channel(&card->read); rc2 = qeth_halt_channel(&card->write); rc3 = qeth_halt_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_CARD_TEXT(card, 3, "clearchs"); rc1 = qeth_clear_channel(&card->read); rc2 = qeth_clear_channel(&card->write); rc3 = qeth_clear_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_halt_card(struct qeth_card *card, int halt) { int rc = 0; QETH_CARD_TEXT(card, 3, "clhacrd"); if (halt) rc = qeth_halt_channels(card); if (rc) return rc; return qeth_clear_channels(card); } int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) { int rc = 0; QETH_CARD_TEXT(card, 3, "qdioclr"); switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: if (card->info.type == QETH_CARD_TYPE_IQD) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) QETH_CARD_TEXT_(card, 3, "1err%d", rc); qdio_free(CARD_DDEV(card)); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; case QETH_QDIO_CLEANING: return rc; default: break; } rc = qeth_clear_halt_card(card, use_halt); if (rc) QETH_CARD_TEXT_(card, 3, "2err%d", rc); card->state = CARD_STATE_DOWN; return rc; } EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); static int qeth_read_conf_data(struct qeth_card *card, void **buffer, int *length) { struct ciw *ciw; char *rcd_buf; int ret; struct qeth_channel *channel = &card->data; unsigned long flags; /* * scan for RCD command in extended SenseID data */ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); if (!rcd_buf) return -ENOMEM; channel->ccw.cmd_code = ciw->cmd; channel->ccw.cda = (__u32) __pa(rcd_buf); channel->ccw.count = ciw->count; channel->ccw.flags = CCW_FLAG_SLI; channel->state = CH_STATE_RCD; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (!ret) wait_event(card->wait_q, (channel->state == CH_STATE_RCD_DONE || channel->state == CH_STATE_DOWN)); if (channel->state == CH_STATE_DOWN) ret = -EIO; else channel->state = CH_STATE_DOWN; if (ret) { kfree(rcd_buf); *buffer = NULL; *length = 0; } else { *length = ciw->count; *buffer = rcd_buf; } return ret; } static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgunit"); card->info.chpid = prcd[30]; card->info.unit_addr2 = prcd[31]; card->info.cula = prcd[63]; card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && (prcd[0x11] == _ascebc['M'])); } static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] >= 0xF1 && prcd[76] <= 0xF4) { card->info.blkt.time_total = 0; card->info.blkt.inter_packet = 0; card->info.blkt.inter_packet_jumbo = 0; } else { card->info.blkt.time_total = 250; card->info.blkt.inter_packet = 5; card->info.blkt.inter_packet_jumbo = 15; } } static void qeth_init_tokens(struct qeth_card *card) { card->token.issuer_rm_w = 0x00010103UL; card->token.cm_filter_w = 0x00010108UL; card->token.cm_connection_w = 0x0001010aUL; card->token.ulp_filter_w = 0x0001010bUL; card->token.ulp_connection_w = 0x0001010dUL; } static void qeth_init_func_level(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; break; case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSN: card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; break; default: break; } } static int qeth_idx_activate_get_answer(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_cmd_buffer *iob; unsigned long flags; int rc; struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "idxanswr"); card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); channel->ccw.count = QETH_BUFSIZE; channel->ccw.cda = (__u32) __pa(iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_UP, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; } static int qeth_idx_activate_channel(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_card *card; struct qeth_cmd_buffer *iob; unsigned long flags; __u16 temp; __u8 tmp; int rc; struct ccw_dev_id temp_devid; card = CARD_FROM_CDEV(channel->ccwdev); QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = IDX_ACTIVATE_SIZE; channel->ccw.cda = (__u32) __pa(iob->data); if (channel == &card->write) { memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; } else { memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); } tmp = ((__u8)card->info.portno) | 0x80; memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), &card->info.func_level, sizeof(__u16)); ccw_device_get_id(CARD_DDEV(card), &temp_devid); memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_ACTIVATING) { dev_warn(&channel->ccwdev->dev, "The qeth device driver" " failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); } static int qeth_peer_func_level(int level) { if ((level & 0xff) == 8) return (level & 0xff) + 0x400; if (((level >> 8) & 3) == 1) return (level & 0xff) + 0x200; return level; } static void qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); else QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" " negative reply\n", dev_name(&card->write.ccwdev->dev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " "function level mismatch (sent: 0x%x, received: " "0x%x)\n", dev_name(&card->write.ccwdev->dev), card->info.func_level, temp); goto out; } channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } static void qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (qeth_check_idx_response(card, iob->data)) goto out; if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { case QETH_IDX_ACT_ERR_EXCL: dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); break; case QETH_IDX_ACT_ERR_AUTH: case QETH_IDX_ACT_ERR_AUTH_USER: dev_err(&card->read.ccwdev->dev, "Setting the device online failed because of " "insufficient authorization\n"); break; default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", dev_name(&card->read.ccwdev->dev)); } QETH_CARD_TEXT_(card, 2, "idxread%c", QETH_IDX_ACT_CAUSE_CODE(iob->data)); goto out; } /** * * temporary fix for microcode bug * * to revert it,replace OR by AND * */ if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) || (card->info.type == QETH_CARD_TYPE_OSD)) card->info.portname_required = 1; memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if (temp != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " "level mismatch (sent: 0x%x, received: 0x%x)\n", dev_name(&card->read.ccwdev->dev), card->info.func_level, temp); goto out; } memcpy(&card->token.issuer_rm_r, QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); memcpy(&card->info.mcl_level[0], QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { qeth_setup_ccw(&card->write, iob->data, len); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { int rc; unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 2, "sendctl"); if (card->read_or_write_problem) { qeth_release_buffer(iob->channel, iob); return -EIO; } reply = qeth_alloc_reply(card); if (!reply) { return -ENOMEM; } reply->callback = reply_cb; reply->param = reply_param; if (card->state == CARD_STATE_DOWN) reply->seqno = QETH_IDX_COMMAND_SEQNO; else reply->seqno = card->seqno.ipa++; init_waitqueue_head(&reply->wait_q); spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) event_timeout = QETH_IPA_TIMEOUT; else event_timeout = QETH_TIMEOUT; timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&card->write.ccwdev->dev), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); spin_unlock_irqrestore(&card->lock, flags); qeth_release_buffer(iob->channel, iob); atomic_set(&card->write.irq_pending, 0); wake_up(&card->wait_q); return rc; } /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if ((cmd->hdr.command == IPA_CMD_SETIP) && (cmd->hdr.prot_version == QETH_PROT_IPV4)) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; } else { while (!atomic_read(&reply->received)) { if (time_after(jiffies, timeout)) goto time_err; cpu_relax(); } } if (reply->rc == -EIO) goto error; rc = reply->rc; qeth_put_reply(reply); return rc; time_err: reply->rc = -ETIME; spin_lock_irqsave(&reply->card->lock, flags); list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); error: atomic_set(&card->write.irq_pending, 0); qeth_release_buffer(iob->channel, iob); card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; } EXPORT_SYMBOL_GPL(qeth_send_control_data); static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_filter_r, QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_enable(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenable"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, qeth_cm_enable_cb, NULL); return rc; } static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_connection_r, QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_setup(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, qeth_cm_setup_cb, NULL); return rc; } static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: return 1500; case QETH_CARD_TYPE_IQD: return card->info.max_mtu; case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_LANE_TR: return 2000; default: return card->options.layer2 ? 1500 : 1492; } case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: return card->options.layer2 ? 1500 : 1492; default: return 1500; } } static inline int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: return 8192; case 0x6000: return 16384; case 0xa000: return 32768; case 0xffff: return 57344; default: return 0; } } static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && (mtu <= card->info.max_mtu)); case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: return 1; } } static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { __u16 mtu, framesize; __u16 len; __u8 link_type; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (card->info.type == QETH_CARD_TYPE_IQD) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); if (!mtu) { iob->rc = -EINVAL; QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { /* frame size has changed */ if (card->dev && ((card->dev->mtu == card->info.initial_mtu) || (card->dev->mtu > mtu))) card->dev->mtu = mtu; qeth_free_qdio_buffers(card); } card->info.initial_mtu = mtu; card->info.max_mtu = mtu; card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU( iob->data); card->info.initial_mtu = min(card->info.max_mtu, qeth_get_initial_mtu_for_card(card)); card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; } memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { memcpy(&link_type, QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); card->info.link_type = link_type; } else card->info.link_type = 0; QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_ulp_enable(struct qeth_card *card) { int rc; char prot_type; struct qeth_cmd_buffer *iob; /*FIXME: trace view callbacks*/ QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (__u8) card->info.portno; if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data), card->info.portname, 9); rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, qeth_ulp_enable_cb, NULL); return rc; } static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_connection_r, QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 3)) { QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); iob->rc = -EMLINK; } QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_ulp_setup(struct qeth_card *card) { int rc; __u16 temp; struct qeth_cmd_buffer *iob; struct ccw_dev_id dev_id; QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); ccw_device_get_id(CARD_DDEV(card), &dev_id); memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, qeth_ulp_setup_cb, NULL); return rc; } static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) { int rc; struct qeth_qdio_out_buffer *newbuf; rc = 0; newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); if (!newbuf) { rc = -ENOMEM; goto out; } newbuf->buffer = &q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; newbuf->aob = NULL; newbuf->next_pending = q->bufs[bidx]; atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); q->bufs[bidx] = newbuf; if (q->bufstates) { q->bufstates[bidx].user = newbuf; QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf->next_pending); } out: return rc; } static int qeth_alloc_qdio_buffers(struct qeth_card *card) { int i, j; QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); if (!card->qdio.in_q) goto out_nomem; QETH_DBF_TEXT(SETUP, 2, "inq"); QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); /* give inbound qeth_qdio_buffers their qdio_buffers */ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { card->qdio.in_q->bufs[i].buffer = &card->qdio.in_q->qdio_bufs[i]; card->qdio.in_q->bufs[i].rx_skb = NULL; } /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; /* outbound */ card->qdio.out_qs = kzalloc(card->qdio.no_out_queues * sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), GFP_KERNEL); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) goto out_freeoutqbufs; } } /* completion */ if (qeth_alloc_cq(card)) goto out_freeoutq; return 0; out_freeoutqbufs: while (j > 0) { --j; kmem_cache_free(qeth_qdio_outbuf_cache, card->qdio.out_qs[i]->bufs[j]); card->qdio.out_qs[i]->bufs[j] = NULL; } out_freeoutq: while (i > 0) { kfree(card->qdio.out_qs[--i]); qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: qeth_free_buffer_pool(card); out_freeinq: kfree(card->qdio.in_q); card->qdio.in_q = NULL; out_nomem: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); return -ENOMEM; } static void qeth_create_qib_param_field(struct qeth_card *card, char *param_field) { param_field[0] = _ascebc['P']; param_field[1] = _ascebc['C']; param_field[2] = _ascebc['I']; param_field[3] = _ascebc['T']; *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card); *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card); *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card); } static void qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field) { param_field[16] = _ascebc['B']; param_field[17] = _ascebc['L']; param_field[18] = _ascebc['K']; param_field[19] = _ascebc['T']; *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total; *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet; *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo; } static int qeth_qdio_activate(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 3, "qdioact"); return qdio_activate(CARD_DDEV(card)); } static int qeth_dm_act(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "dmact"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, DM_ACT, DM_ACT_SIZE); memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); return rc; } static int qeth_mpc_initialize(struct qeth_card *card) { int rc; QETH_DBF_TEXT(SETUP, 2, "mpcinit"); rc = qeth_issue_next_read(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } rc = qeth_cm_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto out_qdio; } rc = qeth_cm_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out_qdio; } rc = qeth_ulp_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); goto out_qdio; } rc = qeth_ulp_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_alloc_qdio_buffers(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_qdio_establish(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); qeth_free_qdio_buffers(card); goto out_qdio; } rc = qeth_qdio_activate(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out_qdio; } rc = qeth_dm_act(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out_qdio; } return 0; out_qdio: qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); return rc; } static void qeth_print_status_with_portname(struct qeth_card *card) { char dbf_text[15]; int i; sprintf(dbf_text, "%s", card->info.portname + 1); for (i = 0; i < 8; i++) dbf_text[i] = (char) _ebcasc[(__u8) dbf_text[i]]; dbf_text[8] = 0; dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" "with link type %s (portname: %s)\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card), dbf_text); } static void qeth_print_status_no_portname(struct qeth_card *card) { if (card->info.portname[0]) dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s " "(no portname needed by interface).\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); else dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s.\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); } void qeth_print_status_message(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: /* VM will use a non-zero first character * to indicate a HiperSockets like reporting * of the level OSA sets the first character to zero * */ if (!card->info.mcl_level[0]) { sprintf(card->info.mcl_level, "%02x%02x", card->info.mcl_level[2], card->info.mcl_level[3]); card->info.mcl_level[QETH_MCL_LENGTH] = 0; break; } /* fallthrough */ case QETH_CARD_TYPE_IQD: if ((card->info.guestlan) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) card->info.mcl_level[0]]; card->info.mcl_level[1] = (char) _ebcasc[(__u8) card->info.mcl_level[1]]; card->info.mcl_level[2] = (char) _ebcasc[(__u8) card->info.mcl_level[2]]; card->info.mcl_level[3] = (char) _ebcasc[(__u8) card->info.mcl_level[3]]; card->info.mcl_level[QETH_MCL_LENGTH] = 0; } break; default: memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); } if (card->info.portname_required) qeth_print_status_with_portname(card); else qeth_print_status_no_portname(card); } EXPORT_SYMBOL_GPL(qeth_print_status_message); static void qeth_initialize_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; QETH_CARD_TEXT(card, 5, "inwrklst"); list_for_each_entry(entry, &card->qdio.init_pool.entry_list, init_list) { qeth_put_buffer_pool_entry(card, entry); } } static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; struct page *page; if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { free = 0; break; } } if (free) { list_del_init(&entry->list); return entry; } } /* no free buffer in pool so take first one and swap pages */ entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { page = alloc_page(GFP_ATOMIC); if (!page) { return NULL; } else { free_page((unsigned long)entry->elements[i]); entry->elements[i] = page_address(page); if (card->options.performance_stats) card->perf_stats.sg_alloc_page_rx++; } } } list_del_init(&entry->list); return entry; } static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; int i; if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); if (!buf->rx_skb) return 1; } pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) return 1; /* * since the buffer is accessed only from the input_tasklet * there shouldn't be a need to synchronize; also, since we use * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off * buffers */ buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = pool_entry->elements[i]; if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else buf->buffer->element[i].eflags = 0; buf->buffer->element[i].sflags = 0; } return 0; } int qeth_init_qdio_queues(struct qeth_card *card) { int i, j; int rc; QETH_DBF_TEXT(SETUP, 2, "initqdqs"); /* inbound queue */ memset(card->qdio.in_q->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, card->qdio.in_buf_pool.buf_count - 1); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } /* completion */ rc = qeth_cq_init(card); if (rc) { return rc; } /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], card->qdio.out_qs[i]->bufs[j], QETH_QDIO_BUF_EMPTY); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; card->qdio.out_qs[i]->do_pack = 0; atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); atomic_set(&card->qdio.out_qs[i]->state, QETH_OUT_Q_UNLOCKED); } return 0; } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: return 2; default: return 1; } } static void qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd, __u8 command, enum qeth_prot_versions prot) { memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; cmd->hdr.seqno = card->seqno.ipa; cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) cmd->hdr.prim_version_no = 2; else cmd->hdr.prim_version_no = 1; cmd->hdr.param_count = 1; cmd->hdr.prot_version = prot; cmd->hdr.ipa_supported = 0; cmd->hdr.ipa_enabled = 0; } struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_wait_for_buffer(&card->write); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); return iob; } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, char prot_type) { memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param) { int rc; char prot_type; QETH_CARD_TEXT(card, 4, "sendipa"); if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; qeth_prepare_ipa_cmd(card, iob, prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); if (rc == -ETIME) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); } return rc; } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); int qeth_send_startlan(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_send_startlan); static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; return 0; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 3, "quyadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.hdr.cmdlength = cmdlen; cmd->data.setadapterparms.hdr.command_code = command; cmd->data.setadapterparms.hdr.used_total = 1; cmd->data.setadapterparms.hdr.seq_no = 1; return iob; } int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); static int qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "qipasscb"); cmd = (struct qeth_ipa_cmd *) data; switch (cmd->hdr.return_code) { case IPA_RC_NOTSUPP: case IPA_RC_L2_UNSUPPORTED_CMD: QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; return -0; default: if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " "rc=%d\n", dev_name(&card->gdev->dev), cmd->hdr.return_code); return 0; } } if (cmd->hdr.prot_version == QETH_PROT_IPV4) { card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; } else QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" "\n", dev_name(&card->gdev->dev)); return 0; } int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; __u16 rc; cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); else card->info.diagass_support = cmd->data.diagass.ext; return 0; } static int qeth_query_setdiagass(struct qeth_card *card) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "qdiagass"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); } static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) { unsigned long info = get_zeroed_page(GFP_KERNEL); struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; struct ccw_dev_id ccwid; int level; tid->chpid = card->info.chpid; ccw_device_get_id(CARD_RDEV(card), &ccwid); tid->ssid = ccwid.ssid; tid->devno = ccwid.devno; if (!info) return; level = stsi(NULL, 0, 0, 0); if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) tid->lparnr = info222->lpar_number; if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); } free_page(info); return; } static int qeth_hw_trap_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; __u16 rc; cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); return 0; } int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(SETUP, 2, "diagtrap"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 80; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; cmd->data.diagass.type = 1; cmd->data.diagass.action = action; switch (action) { case QETH_DIAGS_TRAP_ARM: cmd->data.diagass.options = 0x0003; cmd->data.diagass.ext = 0x00010000 + sizeof(struct qeth_trap_id); qeth_get_trap_id(card, (struct qeth_trap_id *)cmd->data.diagass.cdata); break; case QETH_DIAGS_TRAP_DISARM: cmd->data.diagass.options = 0x0001; break; case QETH_DIAGS_TRAP_CAPTURE: break; } return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); } EXPORT_SYMBOL_GPL(qeth_hw_trap); int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, unsigned int qdio_error, const char *dbftext) { if (qdio_error) { QETH_CARD_TEXT(card, 2, dbftext); QETH_CARD_TEXT_(card, 2, " F15=%02X", buf->element[15].sflags); QETH_CARD_TEXT_(card, 2, " F14=%02X", buf->element[14].sflags); QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].sflags) == 0x12) { card->stats.rx_dropped++; return 0; } else return 1; } return 0; } EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); void qeth_buffer_reclaim_work(struct work_struct *work) { struct qeth_card *card = container_of(work, struct qeth_card, buffer_reclaim_work.work); QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); qeth_queue_input_buffer(card, card->reclaim_index); } void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; struct list_head *lh; int count; int i; int rc; int newcount = 0; count = (index < queue->next_buf_to_init)? card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init - index) : card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); /* only requeue at a certain threshold to avoid SIGAs */ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { for (i = queue->next_buf_to_init; i < queue->next_buf_to_init + count; ++i) { if (qeth_init_input_buffer(card, &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { break; } else { newcount++; } } if (newcount < count) { /* we are in memory shortage so we switch back to traditional skb allocation and drop packages */ atomic_set(&card->force_alloc_skb, 3); count = newcount; } else { atomic_add_unless(&card->force_alloc_skb, -1, 0); } if (!count) { i = 0; list_for_each(lh, &card->qdio.in_buf_pool.entry_list) i++; if (i == card->qdio.in_buf_pool.buf_count) { QETH_CARD_TEXT(card, 2, "qsarbw"); card->reclaim_index = index; schedule_delayed_work( &card->buffer_reclaim_work, QETH_RECLAIM_WORK_TIME); } return; } /* * according to old code it should be avoided to requeue all * 128 buffers in order to benefit from PCI avoidance. * this function keeps at least one buffer (the buffer at * 'index') un-requeued -> this buffer is the first buffer that * will be requeued the next time */ if (card->options.performance_stats) { card->perf_stats.inbound_do_qdio_cnt++; card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); if (card->options.performance_stats) card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; } } EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].sflags; QETH_CARD_TEXT(card, 6, "hdsnderr"); if (card->info.type == QETH_CARD_TYPE_IQD) { if (sbalf15 == 0) { qdio_err = 0; } else { qdio_err = 1; } } qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); if (!qdio_err) return QETH_SEND_ERROR_NONE; if ((sbalf15 >= 15) && (sbalf15 <= 31)) return QETH_SEND_ERROR_RETRY; QETH_CARD_TEXT(card, 1, "lnkfail"); QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); return QETH_SEND_ERROR_LINK_FAILURE; } /* * Switched to packing state if the number of used buffers on a queue * reaches a certain limit. */ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) { if (!queue->do_pack) { if (atomic_read(&queue->used_buffers) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ QETH_CARD_TEXT(queue->card, 6, "np->pack"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_dp_p++; queue->do_pack = 1; } } } /* * Switches from packing to non-packing mode. If there is a packing * buffer on the queue this buffer will be prepared to be flushed. * In that case 1 is returned to inform the caller. If no buffer * has to be flushed, zero is returned. */ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; int flush_count = 0; if (queue->do_pack) { if (atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ QETH_CARD_TEXT(queue->card, 6, "pack->np"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; /* flush packing buffers */ buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; } } } return flush_count; } /* * Called to flush a packing buffer if no more pci flags are on the queue. * Checks if there is a packing buffer and prepares it to be flushed. * In that case returns 1, otherwise zero. */ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { /* it's a packing buffer */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; return 1; } return 0; } static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { struct qeth_qdio_out_buffer *buf; int rc; int i; unsigned int qdio_flags; for (i = index; i < index + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buf = queue->bufs[bidx]; buf->buffer->element[buf->next_element_to_fill - 1].eflags |= SBAL_EFLAGS_LAST_ENTRY; if (queue->bufstates) queue->bufstates[bidx].user = buf; if (queue->card->info.type == QETH_CARD_TYPE_IQD) continue; if (!queue->do_pack) { if ((atomic_read(&queue->used_buffers) >= (QETH_HIGH_WATERMARK_PACK - QETH_WATERMARK_PACK_FUZZ)) && !atomic_read(&queue->set_pci_flags_count)) { /* it's likely that we'll go to packing * mode soon */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } else { if (!atomic_read(&queue->set_pci_flags_count)) { /* * there's no outstanding PCI any more, so we * have to request a PCI to be sure the the PCI * will wake at some time in the future then we * can flush packed buffers that might still be * hanging around, which can happen if no * further send was requested by the stack */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } } queue->card->dev->trans_start = jiffies; if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros(); } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; QETH_CARD_TEXT(queue->card, 2, "flushbuf"); QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); QETH_CARD_TEXT_(queue->card, 2, " c%d", count); QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); /* this must not happen under normal circumstances. if it * happens something is really wrong -> recover */ qeth_schedule_recovery(queue->card); return; } if (queue->card->options.performance_stats) queue->card->perf_stats.bufs_sent += count; } static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; int flush_cnt = 0; int q_was_packing = 0; /* * check if weed have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == QETH_OUT_Q_UNLOCKED) { /* * If we get in here, there was no action in * do_send_packet. So, we check if there is a * packing buffer to be flushed here. */ netif_stop_queue(queue->card->dev); index = queue->next_buf_to_fill; q_was_packing = queue->do_pack; /* queue->do_pack may change */ barrier(); flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) flush_cnt += qeth_flush_buffers_on_no_pci(queue); if (queue->card->options.performance_stats && q_was_packing) queue->card->perf_stats.bufs_sent_pack += flush_cnt; if (flush_cnt) qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } } } void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; if (card->dev && (card->dev->flags & IFF_UP)) napi_schedule(&card->napi); } EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) { int rc; if (card->options.cq == QETH_CQ_NOTAVAILABLE) { rc = -1; goto out; } else { if (card->options.cq == cq) { rc = 0; goto out; } if (card->state != CARD_STATE_DOWN && card->state != CARD_STATE_RECOVER) { rc = -1; goto out; } qeth_free_qdio_buffers(card); card->options.cq = cq; rc = 0; } out: return rc; } EXPORT_SYMBOL_GPL(qeth_configure_cq); static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) { struct qeth_qdio_q *cq = card->qdio.c_q; int i; int rc; if (!qeth_is_cq(card, queue)) goto out; QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); if (qdio_err) { netif_stop_queue(card->dev); qeth_schedule_recovery(card); goto out; } if (card->options.performance_stats) { card->perf_stats.cq_cnt++; card->perf_stats.cq_start_time = qeth_get_micros(); } for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; int e; e = 0; while (buffer->element[e].addr) { unsigned long phys_aob_addr; phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); buffer->element[e].addr = NULL; buffer->element[e].eflags = 0; buffer->element[e].sflags = 0; buffer->element[e].length = 0; ++e; } buffer->element[15].eflags = 0; buffer->element[15].sflags = 0; } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, card->qdio.c_q->next_buf_to_init, count); if (rc) { dev_warn(&card->gdev->dev, "QDIO reported an error, rc=%i\n", rc); QETH_CARD_TEXT(card, 2, "qcqherr"); } card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; netif_wake_queue(card->dev); if (card->options.performance_stats) { int delta_t = qeth_get_micros(); delta_t -= card->perf_stats.cq_start_time; card->perf_stats.cq_time += delta_t; } out: return; } void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, unsigned int queue, int first_elem, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; QETH_CARD_TEXT_(card, 2, "qihq%d", queue); QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); if (qeth_is_cq(card, queue)) qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); else if (qdio_err) qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int qdio_error, int __queue, int first_element, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; int i; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_FATAL) { QETH_CARD_TEXT(card, 2, "achkcond"); netif_stop_queue(card->dev); qeth_schedule_recovery(card); return; } if (card->options.performance_stats) { card->perf_stats.outbound_handler_cnt++; card->perf_stats.outbound_handler_start_time = qeth_get_micros(); } for (i = first_element; i < (first_element + count); ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; qeth_handle_send_error(card, buffer, qdio_error); if (queue->bufstates && (queue->bufstates[bidx].flags & QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, QETH_QDIO_BUF_PENDING) == QETH_QDIO_BUF_PRIMED) { qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); } buffer->aob = queue->bufstates[bidx].aob; QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); } } else { if (card->options.cq == QETH_CQ_ENABLED) { enum iucv_tx_notify n; n = qeth_compute_cq_notification( buffer->buffer->element[15].sflags, 0); qeth_notify_skbs(queue, buffer, n); } qeth_clear_output_buffer(queue, buffer, QETH_QDIO_BUF_EMPTY); } qeth_cleanup_handled_pending(queue, bidx, 0); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ if (card->info.type != QETH_CARD_TYPE_IQD) qeth_check_outbound_queue(queue); netif_wake_queue(queue->card->dev); if (card->options.performance_stats) card->perf_stats.outbound_handler_time += qeth_get_micros() - card->perf_stats.outbound_handler_start_time; } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX)) return card->qdio.default_out_queue; switch (card->qdio.no_out_queues) { case 4: if (cast_type && card->info.is_multicast_different) return card->info.is_multicast_different & (card->qdio.no_out_queues - 1); if (card->qdio.do_prio_queueing && (ipv == 4)) { const u8 tos = ip_hdr(skb)->tos; if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_TOS) { if (tos & IP_TOS_NOTIMPORTANT) return 3; if (tos & IP_TOS_HIGHRELIABILITY) return 2; if (tos & IP_TOS_HIGHTHROUGHPUT) return 1; if (tos & IP_TOS_LOWDELAY) return 0; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) return 3 - (tos >> 6); } else if (card->qdio.do_prio_queueing && (ipv == 6)) { /* TODO: IPv6!!! */ } return card->qdio.default_out_queue; case 1: /* fallthrough for single-out-queue 1920-device */ default: return card->qdio.default_out_queue; } } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); int qeth_get_elements_for_frags(struct sk_buff *skb) { int cnt, length, e, elements = 0; struct skb_frag_struct *frag; char *data; for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; data = (char *)page_to_phys(skb_frag_page(frag)) + frag->page_offset; length = frag->size; e = PFN_UP((unsigned long)data + length - 1) - PFN_DOWN((unsigned long)data); elements += e; } return elements; } EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int elems) { int dlen = skb->len - skb->data_len; int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - PFN_DOWN((unsigned long)skb->data); elements_needed += qeth_get_elements_for_frags(skb); if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", (elements_needed+elems), skb->len); return 0; } return elements_needed; } EXPORT_SYMBOL_GPL(qeth_get_elements_no); int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) { int hroom, inpage, rest; if (((unsigned long)skb->data & PAGE_MASK) != (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { hroom = skb_headroom(skb); inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); rest = len - inpage; if (rest > hroom) return 1; memmove(skb->data - rest, skb->data, skb->len - skb->data_len); skb->data -= rest; skb->tail -= rest; *hdr = (struct qeth_hdr *)skb->data; QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); } return 0; } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { int length = skb->len - skb->data_len; int length_here; int element; char *data; int first_lap, cnt; struct skb_frag_struct *frag; element = *next_element_to_fill; data = skb->data; first_lap = (is_tso == 0 ? 1 : 0); if (offset >= 0) { data = skb->data + offset; length -= offset; first_lap = 0; } while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); if (length < length_here) length_here = length; buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; if (!length) { if (first_lap) if (skb_shinfo(skb)->nr_frags) buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; else buffer->element[element].eflags = 0; else buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; } else { if (first_lap) buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; else buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; first_lap = 0; } for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; data = (char *)page_to_phys(skb_frag_page(frag)) + frag->page_offset; length = frag->size; while (length > 0) { length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); if (length < length_here) length_here = length; buffer->element[element].addr = data; buffer->element[element].length = length_here; buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; length -= length_here; data += length_here; element++; } } if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; *next_element_to_fill = element; } static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, int offset, int hd_len) { struct qdio_buffer *buffer; int flush_cnt = 0, hdr_len, large_send = 0; buffer = buf->buffer; atomic_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); /*check first on TSO ....*/ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { int element = buf->next_element_to_fill; hdr_len = sizeof(struct qeth_hdr_tso) + ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; /*fill first buffer entry only with header information */ buffer->element[element].addr = skb->data; buffer->element[element].length = hdr_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; skb->data += hdr_len; skb->len -= hdr_len; large_send = 1; } if (offset >= 0) { int element = buf->next_element_to_fill; buffer->element[element].addr = hdr; buffer->element[element].length = sizeof(struct qeth_hdr) + hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->is_header[element] = 1; buf->next_element_to_fill++; } __qeth_fill_buffer(skb, buffer, large_send, (int *)&buf->next_element_to_fill, offset); if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } else { QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); if (queue->card->options.performance_stats) queue->card->perf_stats.skbs_sent_pack++; if (buf->next_element_to_fill >= QETH_MAX_BUFFER_ELEMENTS(queue->card)) { /* * packed buffer if full -> set state PRIMED * -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } } return flush_cnt; } int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) goto out; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); return 0; out: atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; int flush_count = 0; int do_pack = 0; int tmp; int rc = 0; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } /* check if we need to switch packing state of this queue */ qeth_switch_to_packing_if_needed(queue); if (queue->do_pack) { do_pack = 1; /* does packet fit in current buffer? */ if ((QETH_MAX_BUFFER_ELEMENTS(card) - buffer->next_element_to_fill) < elements_needed) { /* ... no -> set state PRIMED */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[queue->next_buf_to_fill]; /* we did a step forward, so check buffer state * again */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, flush_count); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } } } tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); /* * queue->state will go from LOCKED -> UNLOCKED or from * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us * (switch packing state or flush buffer to get another pci flag out). * In that case we will enter this loop */ while (atomic_dec_return(&queue->state)) { flush_count = 0; start_index = queue->next_buf_to_fill; /* check if we can go back to non-packing state */ flush_count += qeth_switch_to_nonpacking_if_needed(queue); /* * check if we need to flush a packing buffer to get a pci * flag out on the queue */ if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) flush_count += qeth_flush_buffers_on_no_pci(queue); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); } /* at this point the queue is UNLOCKED again */ if (queue->card->options.performance_stats && do_pack) queue->card->perf_stats.bufs_sent_pack += flush_count; return rc; } EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; return 0; } void qeth_setadp_promisc_mode(struct qeth_card *card) { enum qeth_ipa_promisc_modes mode; struct net_device *dev = card->dev; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "setprom"); if (((dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || (!(dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) return; mode = SET_PROMISC_MODE_OFF; if (dev->flags & IFF_PROMISC) mode = SET_PROMISC_MODE_ON; QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.mode = mode; qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); int qeth_change_mtu(struct net_device *dev, int new_mtu) { struct qeth_card *card; char dbf_text[15]; card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "chgmtu"); sprintf(dbf_text, "%8x", new_mtu); QETH_CARD_TEXT(card, 4, dbf_text); if (new_mtu < 64) return -EINVAL; if (new_mtu > 65535) return -EINVAL; if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) && (!qeth_mtu_is_valid(card, new_mtu))) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL_GPL(qeth_change_mtu); struct net_device_stats *qeth_get_stats(struct net_device *dev) { struct qeth_card *card; card = dev->ml_priv; QETH_CARD_TEXT(card, 5, "getstat"); return &card->stats; } EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "chgmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { memcpy(card->dev->dev_addr, &cmd->data.setadapterparms.data.change_addr.addr, OSA_ADDR_LEN); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } int qeth_setadpparms_change_macaddr(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; memcpy(&cmd->data.setadapterparms.data.change_addr.addr, card->dev->dev_addr, OSA_ADDR_LEN); rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", cmd->data.setadapterparms.hdr.return_code); if (cmd->data.setadapterparms.hdr.return_code != SET_ACCESS_CTRL_RC_SUCCESS) QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); switch (cmd->data.setadapterparms.hdr.return_code) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); } else { dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); } break; case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " "deactivated\n", dev_name(&card->gdev->dev)); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" " activated\n", dev_name(&card->gdev->dev)); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); break; case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: dev_err(&card->gdev->dev, "The adjacent switch port does not " "support reflective relay mode\n"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_FAILED: dev_err(&card->gdev->dev, "The reflective relay mode cannot be " "enabled at the adjacent switch port"); if (fallback) card->options.isolation = card->options.prev_isolation; break; case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: dev_warn(&card->gdev->dev, "Turning off reflective relay mode " "at the adjacent switch failed\n"); break; default: /* this should never happen */ if (fallback) card->options.isolation = card->options.prev_isolation; break; } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, enum qeth_ipa_isolation_modes isolation, int fallback) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; QETH_CARD_TEXT(card, 4, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_set_access_ctrl)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; access_ctrl_req->subcmd_code = isolation; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, &fallback); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); return rc; } int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) { int rc = 0; QETH_CARD_TEXT(card, 4, "setactlo"); if ((card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) && qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", card->gdev->dev.kobj.name, rc); rc = -EOPNOTSUPP; } } else if (card->options.isolation != ISOLATION_MODE_NONE) { card->options.isolation = ISOLATION_MODE_NONE; dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); rc = -EOPNOTSUPP; } return rc; } EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); card->stats.tx_errors++; qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) { struct qeth_card *card = dev->ml_priv; int rc = 0; switch (regnum) { case MII_BMCR: /* Basic mode control register */ rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && (card->info.link_type != QETH_LINK_TYPE_OSN) && (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) rc |= BMCR_SPEED100; break; case MII_BMSR: /* Basic mode status register */ rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | BMSR_100BASE4; break; case MII_PHYSID1: /* PHYS ID 1 */ rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | dev->dev_addr[2]; rc = (rc >> 5) & 0xFFFF; break; case MII_PHYSID2: /* PHYS ID 2 */ rc = (dev->dev_addr[2] << 10) & 0xFFFF; break; case MII_ADVERTISE: /* Advertisement control reg */ rc = ADVERTISE_ALL; break; case MII_LPA: /* Link partner ability reg */ rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | LPA_100BASE4 | LPA_LPACK; break; case MII_EXPANSION: /* Expansion register */ break; case MII_DCOUNTER: /* disconnect counter */ break; case MII_FCSCOUNTER: /* false carrier counter */ break; case MII_NWAYTEST: /* N-way auto-neg test register */ break; case MII_RERRCOUNTER: /* rx error counter */ rc = card->stats.rx_errors; break; case MII_SREVISION: /* silicon revision */ break; case MII_RESV1: /* reserved 1 */ break; case MII_LBRERROR: /* loopback, rx, bypass error */ break; case MII_PHYADDR: /* physical address */ break; case MII_RESV2: /* reserved 2 */ break; case MII_TPISTATUS: /* TPI status for 10mbps */ break; case MII_NCONFIG: /* network interface config */ break; default: break; } return rc; } EXPORT_SYMBOL_GPL(qeth_mdio_read); static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int len, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { u16 s1, s2; QETH_CARD_TEXT(card, 4, "sendsnmp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); /* adjust PDU length fields in IPA_PDU_HEADER */ s1 = (u32) IPA_PDU_HEADER_SIZE + len; s2 = (u32) len; memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, reply_cb, reply_param); } static int qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) { struct qeth_ipa_cmd *cmd; struct qeth_arp_query_info *qinfo; struct qeth_snmp_cmd *snmp; unsigned char *data; __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); qinfo = (struct qeth_arp_query_info *) reply->param; snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); if (cmd->data.setadapterparms.hdr.seq_no == 1) data_len -= (__u16)((char *)&snmp->data - (char *)cmd); else data_len -= (__u16)((char *)&snmp->request - (char *)cmd); /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); cmd->hdr.return_code = IPA_RC_ENOMEM; return 0; } QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ if (cmd->data.setadapterparms.hdr.seq_no == 1) { memcpy(qinfo->udata + qinfo->udata_offset, (char *)snmp, data_len + offsetof(struct qeth_snmp_cmd, data)); qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); } else { memcpy(qinfo->udata + qinfo->udata_offset, (char *)&snmp->request, data_len); } qinfo->udata_offset += data_len; /* check if all replies received ... */ QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); QETH_CARD_TEXT_(card, 4, "srseq%i", cmd->data.setadapterparms.hdr.seq_no); if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) return 1; return 0; } int qeth_snmp_command(struct qeth_card *card, char __user *udata) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_snmp_ureq *ureq; int req_len; struct qeth_arp_query_info qinfo = {0, }; int rc = 0; QETH_CARD_TEXT(card, 3, "snmpcmd"); if (card->info.guestlan) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && (!card->options.layer2)) { return -EOPNOTSUPP; } /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); if (IS_ERR(ureq)) { QETH_CARD_TEXT(card, 2, "snmpnome"); return PTR_ERR(ureq); } qinfo.udata_len = ureq->hdr.data_len; qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); if (!qinfo.udata) { kfree(ureq); return -ENOMEM; } qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, qeth_snmp_command_cb, (void *)&qinfo); if (rc) QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", QETH_CARD_IFNAME(card), rc); else { if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; } kfree(ureq); kfree(qinfo.udata); return rc; } EXPORT_SYMBOL_GPL(qeth_snmp_command); static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; if (resdatalen > (priv->buffer_len - priv->response_len)) { cmd->hdr.return_code = IPA_RC_FFFF; return 0; } memcpy((priv->buffer + priv->response_len), resdata, resdatalen); priv->response_len += resdatalen; if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) return 1; return 0; } int qeth_query_oat_command(struct qeth_card *card, char __user *udata) { int rc = 0; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_query_oat *oat_req; struct qeth_query_oat_data oat_data; struct qeth_qoat_priv priv; void __user *tmp; QETH_CARD_TEXT(card, 3, "qoatcmd"); if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { rc = -EOPNOTSUPP; goto out; } if (copy_from_user(&oat_data, udata, sizeof(struct qeth_query_oat_data))) { rc = -EFAULT; goto out; } priv.buffer_len = oat_data.buffer_len; priv.response_len = 0; priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); if (!priv.buffer) { rc = -ENOMEM; goto out; } iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_query_oat)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { if (is_compat_task()) tmp = compat_ptr(oat_data.ptr); else tmp = (void __user *)(unsigned long)oat_data.ptr; if (copy_to_user(tmp, priv.buffer, priv.response_len)) { rc = -EFAULT; goto out_free; } oat_data.response_len = priv.response_len; if (copy_to_user(udata, &oat_data, sizeof(struct qeth_query_oat_data))) rc = -EFAULT; } else if (rc == IPA_RC_FFFF) rc = -EFAULT; out_free: kfree(priv.buffer); out: return rc; } EXPORT_SYMBOL_GPL(qeth_query_oat_command); static inline int qeth_get_qdio_q_format(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: return 2; default: return 0; } } static void qeth_determine_capabilities(struct qeth_card *card) { int rc; int length; char *prcd; struct ccw_device *ddev; int ddev_offline = 0; QETH_DBF_TEXT(SETUP, 2, "detcapab"); ddev = CARD_DDEV(card); if (!ddev->online) { ddev_offline = 1; rc = ccw_device_set_online(ddev); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out; } } rc = qeth_read_conf_data(card, (void **) &prcd, &length); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", dev_name(&card->gdev->dev), rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_offline; } qeth_configure_unitaddr(card, prcd); if (ddev_offline) qeth_configure_blkt_default(card, prcd); kfree(prcd); rc = qdio_get_ssqd_desc(ddev, &card->ssqd); if (rc) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { dev_info(&card->gdev->dev, "Completion Queueing supported\n"); } else { card->options.cq = QETH_CQ_NOTAVAILABLE; } out_offline: if (ddev_offline == 1) ccw_device_set_offline(ddev); out: return; } static inline void qeth_qdio_establish_cq(struct qeth_card *card, struct qdio_buffer **in_sbal_ptrs, void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { int i; if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[offset + i] = (struct qdio_buffer *) virt_to_phys(card->qdio.c_q->bufs[i].buffer); } queue_start_poll[card->qdio.no_in_queues - 1] = NULL; } } static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; char *qib_param_field; struct qdio_buffer **in_sbal_ptrs; void (**queue_start_poll) (struct ccw_device *, int, unsigned long); struct qdio_buffer **out_sbal_ptrs; int i, j, k; int rc = 0; QETH_DBF_TEXT(SETUP, 2, "qdioest"); qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), GFP_KERNEL); if (!qib_param_field) { rc = -ENOMEM; goto out_free_nothing; } qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); in_sbal_ptrs = kzalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { rc = -ENOMEM; goto out_free_qib_param; } for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[i] = (struct qdio_buffer *) virt_to_phys(card->qdio.in_q->bufs[i].buffer); } queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues, GFP_KERNEL); if (!queue_start_poll) { rc = -ENOMEM; goto out_free_in_sbals; } for (i = 0; i < card->qdio.no_in_queues; ++i) queue_start_poll[i] = card->discipline->start_poll; qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); out_sbal_ptrs = kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!out_sbal_ptrs) { rc = -ENOMEM; goto out_free_queue_start_poll; } for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( card->qdio.out_qs[i]->bufs[j]->buffer); } memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; init_data.no_input_qs = card->qdio.no_in_queues; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline->input_handler; init_data.output_handler = card->discipline->output_handler; init_data.queue_start_poll_array = queue_start_poll; init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; init_data.scan_threshold = (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { rc = qdio_allocate(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); goto out; } rc = qdio_establish(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); qdio_free(CARD_DDEV(card)); } } switch (card->options.cq) { case QETH_CQ_ENABLED: dev_info(&card->gdev->dev, "Completion Queue support enabled"); break; case QETH_CQ_DISABLED: dev_info(&card->gdev->dev, "Completion Queue support disabled"); break; default: break; } out: kfree(out_sbal_ptrs); out_free_queue_start_poll: kfree(queue_start_poll); out_free_in_sbals: kfree(in_sbal_ptrs); out_free_qib_param: kfree(qib_param_field); out_free_nothing: return rc; } static void qeth_core_free_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "freecrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); if (card->dev) free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); } void qeth_trace_features(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "features"); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs); QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support); } EXPORT_SYMBOL_GPL(qeth_trace_features); static struct ccw_device_id qeth_ids[] = { {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), .driver_info = QETH_CARD_TYPE_OSD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), .driver_info = QETH_CARD_TYPE_OSN}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), .driver_info = QETH_CARD_TYPE_OSM}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), .driver_info = QETH_CARD_TYPE_OSX}, {}, }; MODULE_DEVICE_TABLE(ccw, qeth_ids); static struct ccw_driver qeth_ccw_driver = { .driver = { .owner = THIS_MODULE, .name = "qeth", }, .ids = qeth_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, }; int qeth_core_hardsetup_card(struct qeth_card *card) { int retries = 3; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); qeth_update_from_chp_desc(card); retry: if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_WDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) goto retriable; rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); if (--retries < 0) goto out; else goto retry; } qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); if (--retries < 0) goto out; else goto retry; } rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); if (--retries < 0) goto out; else goto retry; } card->read_or_write_problem = 0; rc = qeth_mpc_initialize(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out; } card->options.ipa4.supported_funcs = 0; card->options.adp.supported_funcs = 0; card->info.diagass_support = 0; qeth_query_ipassists(card, QETH_PROT_IPV4); if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) qeth_query_setadapterparms(card); if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) qeth_query_setdiagass(card); return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", dev_name(&card->gdev->dev), rc); return rc; } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element *element, struct sk_buff **pskb, int offset, int *pfrag, int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { if (qethbuffer->rx_skb) { /* only if qeth_card.options.cq == QETH_CQ_ENABLED */ *pskb = qethbuffer->rx_skb; qethbuffer->rx_skb = NULL; } else { *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); if (!(*pskb)) return -ENOMEM; } skb_reserve(*pskb, ETH_HLEN); if (data_len <= QETH_RX_PULL_LEN) { memcpy(skb_put(*pskb, data_len), element->addr + offset, data_len); } else { get_page(page); memcpy(skb_put(*pskb, QETH_RX_PULL_LEN), element->addr + offset, QETH_RX_PULL_LEN); skb_fill_page_desc(*pskb, *pfrag, page, offset + QETH_RX_PULL_LEN, data_len - QETH_RX_PULL_LEN); (*pskb)->data_len += data_len - QETH_RX_PULL_LEN; (*pskb)->len += data_len - QETH_RX_PULL_LEN; (*pskb)->truesize += data_len - QETH_RX_PULL_LEN; (*pfrag)++; } } else { get_page(page); skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len); (*pskb)->data_len += data_len; (*pskb)->len += data_len; (*pskb)->truesize += data_len; (*pfrag)++; } return 0; } struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) { struct qdio_buffer_element *element = *__element; struct qdio_buffer *buffer = qethbuffer->buffer; int offset = *__offset; struct sk_buff *skb = NULL; int skb_len = 0; void *data_ptr; int data_len; int headroom = 0; int use_rx_sg = 0; int frag = 0; /* qeth_hdr must not cross element boundaries */ if (element->length < offset + sizeof(struct qeth_hdr)) { if (qeth_is_last_sbale(element)) return NULL; element++; offset = 0; if (element->length < sizeof(struct qeth_hdr)) return NULL; } *hdr = element->addr + offset; offset += sizeof(struct qeth_hdr); switch ((*hdr)->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb_len = (*hdr)->hdr.l2.pkt_length; break; case QETH_HEADER_TYPE_LAYER3: skb_len = (*hdr)->hdr.l3.length; headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: skb_len = (*hdr)->hdr.osn.pdu_length; headroom = sizeof(struct qeth_hdr); break; default: break; } if (!skb_len) return NULL; if (((skb_len >= card->options.rx_sg_cb) && (!(card->info.type == QETH_CARD_TYPE_OSN)) && (!atomic_read(&card->force_alloc_skb))) || (card->options.cq == QETH_CQ_ENABLED)) { use_rx_sg = 1; } else { skb = dev_alloc_skb(skb_len + headroom); if (!skb) goto no_mem; if (headroom) skb_reserve(skb, headroom); } data_ptr = element->addr + offset; while (skb_len) { data_len = min(skb_len, (int)(element->length - offset)); if (data_len) { if (use_rx_sg) { if (qeth_create_skb_frag(qethbuffer, element, &skb, offset, &frag, data_len)) goto no_mem; } else { memcpy(skb_put(skb, data_len), data_ptr, data_len); } } skb_len -= data_len; if (skb_len) { if (qeth_is_last_sbale(element)) { QETH_CARD_TEXT(card, 4, "unexeob"); QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); card->stats.rx_errors++; return NULL; } element++; offset = 0; data_ptr = element->addr; } else { offset += data_len; } } *__element = element; *__offset = offset; if (use_rx_sg && card->options.performance_stats) { card->perf_stats.sg_skbs_rx++; card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; } return skb; no_mem: if (net_ratelimit()) { QETH_CARD_TEXT(card, 2, "noskbmem"); } card->stats.rx_dropped++; return NULL; } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); static void qeth_unregister_dbf_views(void) { int x; for (x = 0; x < QETH_DBF_INFOS; x++) { debug_unregister(qeth_dbf[x].id); qeth_dbf[x].id = NULL; } } void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) { char dbf_txt_buf[32]; va_list args; if (level > id->level) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); va_end(args); debug_text_event(id, level, dbf_txt_buf); } EXPORT_SYMBOL_GPL(qeth_dbf_longtext); static int qeth_register_dbf_views(void) { int ret; int x; for (x = 0; x < QETH_DBF_INFOS; x++) { /* register the areas */ qeth_dbf[x].id = debug_register(qeth_dbf[x].name, qeth_dbf[x].pages, qeth_dbf[x].areas, qeth_dbf[x].len); if (qeth_dbf[x].id == NULL) { qeth_unregister_dbf_views(); return -ENOMEM; } /* register a view */ ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); if (ret) { qeth_unregister_dbf_views(); return ret; } /* set a passing level */ debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); } return 0; } int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { int rc = 0; mutex_lock(&qeth_mod_mutex); switch (discipline) { case QETH_DISCIPLINE_LAYER3: card->discipline = try_then_request_module( symbol_get(qeth_l3_discipline), "qeth_l3"); break; case QETH_DISCIPLINE_LAYER2: card->discipline = try_then_request_module( symbol_get(qeth_l2_discipline), "qeth_l2"); break; } if (!card->discipline) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); rc = -EINVAL; } mutex_unlock(&qeth_mod_mutex); return rc; } void qeth_core_free_discipline(struct qeth_card *card) { if (card->options.layer2) symbol_put(qeth_l2_discipline); else symbol_put(qeth_l3_discipline); card->discipline = NULL; } static const struct device_type qeth_generic_devtype = { .name = "qeth_generic", .groups = qeth_generic_attr_groups, }; static const struct device_type qeth_osn_devtype = { .name = "qeth_osn", .groups = qeth_osn_attr_groups, }; #define DBF_NAME_LEN 20 struct qeth_dbf_entry { char dbf_name[DBF_NAME_LEN]; debug_info_t *dbf_info; struct list_head dbf_list; }; static LIST_HEAD(qeth_dbf_list); static DEFINE_MUTEX(qeth_dbf_list_mutex); static debug_info_t *qeth_get_dbf_entry(char *name) { struct qeth_dbf_entry *entry; debug_info_t *rc = NULL; mutex_lock(&qeth_dbf_list_mutex); list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { if (strcmp(entry->dbf_name, name) == 0) { rc = entry->dbf_info; break; } } mutex_unlock(&qeth_dbf_list_mutex); return rc; } static int qeth_add_dbf_entry(struct qeth_card *card, char *name) { struct qeth_dbf_entry *new_entry; card->debug = debug_register(name, 2, 1, 8); if (!card->debug) { QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); goto err; } if (debug_register_view(card->debug, &debug_hex_ascii_view)) goto err_dbg; new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); if (!new_entry) goto err_dbg; strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); new_entry->dbf_info = card->debug; mutex_lock(&qeth_dbf_list_mutex); list_add(&new_entry->dbf_list, &qeth_dbf_list); mutex_unlock(&qeth_dbf_list_mutex); return 0; err_dbg: debug_unregister(card->debug); err: return -ENOMEM; } static void qeth_clear_dbf_list(void) { struct qeth_dbf_entry *entry, *tmp; mutex_lock(&qeth_dbf_list_mutex); list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { list_del(&entry->dbf_list); debug_unregister(entry->dbf_info); kfree(entry); } mutex_unlock(&qeth_dbf_list_mutex); } static int qeth_core_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card; struct device *dev; int rc; unsigned long flags; char dbf_name[DBF_NAME_LEN]; QETH_DBF_TEXT(SETUP, 2, "probedev"); dev = &gdev->dev; if (!get_device(dev)) return -ENODEV; QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); card = qeth_alloc_card(); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; goto err_dev; } snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", dev_name(&gdev->dev)); card->debug = qeth_get_dbf_entry(dbf_name); if (!card->debug) { rc = qeth_add_dbf_entry(card, dbf_name); if (rc) goto err_card; } card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; dev_set_drvdata(&gdev->dev, card); card->gdev = gdev; gdev->cdev[0]->handler = qeth_irq; gdev->cdev[1]->handler = qeth_irq; gdev->cdev[2]->handler = qeth_irq; rc = qeth_determine_card_type(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto err_card; } rc = qeth_setup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto err_card; } if (card->info.type == QETH_CARD_TYPE_OSN) gdev->dev.type = &qeth_osn_devtype; else gdev->dev.type = &qeth_generic_devtype; switch (card->info.type) { case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSM: rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); if (rc) goto err_card; rc = card->discipline->setup(card->gdev); if (rc) goto err_disc; case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSX: default: break; } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_add_tail(&card->list, &qeth_core_card_list.list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_determine_capabilities(card); return 0; err_disc: qeth_core_free_discipline(card); err_card: qeth_core_free_card(card); err_dev: put_device(dev); return rc; } static void qeth_core_remove_device(struct ccwgroup_device *gdev) { unsigned long flags; struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); if (card->discipline) { card->discipline->remove(gdev); qeth_core_free_discipline(card); } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_core_free_card(card); dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); return; } static int qeth_core_set_online(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; int def_discipline; if (!card->discipline) { if (card->info.type == QETH_CARD_TYPE_IQD) def_discipline = QETH_DISCIPLINE_LAYER3; else def_discipline = QETH_DISCIPLINE_LAYER2; rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; rc = card->discipline->setup(card->gdev); if (rc) goto err; } rc = card->discipline->set_online(gdev); err: return rc; } static int qeth_core_set_offline(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); return card->discipline->set_offline(gdev); } static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->shutdown) card->discipline->shutdown(gdev); } static int qeth_core_prepare(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->prepare) return card->discipline->prepare(gdev); return 0; } static void qeth_core_complete(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->complete) card->discipline->complete(gdev); } static int qeth_core_freeze(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->freeze) return card->discipline->freeze(gdev); return 0; } static int qeth_core_thaw(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->thaw) return card->discipline->thaw(gdev); return 0; } static int qeth_core_restore(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline && card->discipline->restore) return card->discipline->restore(gdev); return 0; } static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .driver = { .owner = THIS_MODULE, .name = "qeth", }, .setup = qeth_core_probe_device, .remove = qeth_core_remove_device, .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, .prepare = qeth_core_prepare, .complete = qeth_core_complete, .freeze = qeth_core_freeze, .thaw = qeth_core_thaw, .restore = qeth_core_restore, }; static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; err = ccwgroup_create_dev(qeth_core_root_dev, &qeth_core_ccwgroup_driver, 3, buf); return err ? err : count; } static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store); static struct attribute *qeth_drv_attrs[] = { &driver_attr_group.attr, NULL, }; static struct attribute_group qeth_drv_attr_group = { .attrs = qeth_drv_attrs, }; static const struct attribute_group *qeth_drv_attr_groups[] = { &qeth_drv_attr_group, NULL, }; static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { /* 0 */{"rx skbs"}, {"rx buffers"}, {"tx skbs"}, {"tx buffers"}, {"tx skbs no packing"}, {"tx buffers no packing"}, {"tx skbs packing"}, {"tx buffers packing"}, {"tx sg skbs"}, {"tx sg frags"}, /* 10 */{"rx sg skbs"}, {"rx sg frags"}, {"rx sg page allocs"}, {"tx large kbytes"}, {"tx large count"}, {"tx pk state ch n->p"}, {"tx pk state ch p->n"}, {"tx pk watermark low"}, {"tx pk watermark high"}, {"queue 0 buffer usage"}, /* 20 */{"queue 1 buffer usage"}, {"queue 2 buffer usage"}, {"queue 3 buffer usage"}, {"rx poll time"}, {"rx poll count"}, {"rx do_QDIO time"}, {"rx do_QDIO count"}, {"tx handler time"}, {"tx handler count"}, {"tx time"}, /* 30 */{"tx count"}, {"tx do_QDIO time"}, {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, {"cq handler count"}, {"cq handler time"} }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) { switch (stringset) { case ETH_SS_STATS: return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); void qeth_core_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct qeth_card *card = dev->ml_priv; data[0] = card->stats.rx_packets - card->perf_stats.initial_rx_packets; data[1] = card->perf_stats.bufs_rec; data[2] = card->stats.tx_packets - card->perf_stats.initial_tx_packets; data[3] = card->perf_stats.bufs_sent; data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets - card->perf_stats.skbs_sent_pack; data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; data[6] = card->perf_stats.skbs_sent_pack; data[7] = card->perf_stats.bufs_sent_pack; data[8] = card->perf_stats.sg_skbs_sent; data[9] = card->perf_stats.sg_frags_sent; data[10] = card->perf_stats.sg_skbs_rx; data[11] = card->perf_stats.sg_frags_rx; data[12] = card->perf_stats.sg_alloc_page_rx; data[13] = (card->perf_stats.large_send_bytes >> 10); data[14] = card->perf_stats.large_send_cnt; data[15] = card->perf_stats.sc_dp_p; data[16] = card->perf_stats.sc_p_dp; data[17] = QETH_LOW_WATERMARK_PACK; data[18] = QETH_HIGH_WATERMARK_PACK; data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); data[20] = (card->qdio.no_out_queues > 1) ? atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; data[21] = (card->qdio.no_out_queues > 2) ? atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; data[22] = (card->qdio.no_out_queues > 3) ? atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; data[23] = card->perf_stats.inbound_time; data[24] = card->perf_stats.inbound_cnt; data[25] = card->perf_stats.inbound_do_qdio_time; data[26] = card->perf_stats.inbound_do_qdio_cnt; data[27] = card->perf_stats.outbound_handler_time; data[28] = card->perf_stats.outbound_handler_cnt; data[29] = card->perf_stats.outbound_time; data[30] = card->perf_stats.outbound_cnt; data[31] = card->perf_stats.outbound_do_qdio_time; data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; data[35] = card->perf_stats.cq_cnt; data[36] = card->perf_stats.cq_time; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &qeth_ethtool_stats_keys, sizeof(qeth_ethtool_stats_keys)); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(qeth_core_get_strings); void qeth_core_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct qeth_card *card = dev->ml_priv; strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); strlcpy(info->fw_version, card->info.mcl_level, sizeof(info->fw_version)); snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); int qeth_core_ethtool_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; else link_type = card->info.link_type; ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = SUPPORTED_Autoneg; ecmd->advertising = ADVERTISED_Autoneg; ecmd->duplex = DUPLEX_FULL; ecmd->autoneg = AUTONEG_ENABLE; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_100; ecmd->port = PORT_TP; break; case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_1000; ecmd->port = PORT_FIBRE; break; case QETH_LINK_TYPE_10GBIT_ETH: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_10000; ecmd->port = PORT_FIBRE; break; default: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_10; ecmd->port = PORT_TP; } return 0; } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); static int __init qeth_core_init(void) { int rc; pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); INIT_LIST_HEAD(&qeth_dbf_list); rwlock_init(&qeth_core_card_list.rwlock); mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); rc = qeth_register_dbf_views(); if (rc) goto out_err; qeth_core_root_dev = root_device_register("qeth"); rc = PTR_RET(qeth_core_root_dev); if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; } qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); if (!qeth_qdio_outbuf_cache) { rc = -ENOMEM; goto cqslab_err; } rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; return 0; ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: kmem_cache_destroy(qeth_qdio_outbuf_cache); cqslab_err: kmem_cache_destroy(qeth_core_header_cache); slab_err: root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; } static void __exit qeth_core_exit(void) { qeth_clear_dbf_list(); destroy_workqueue(qeth_wq); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); kmem_cache_destroy(qeth_qdio_outbuf_cache); kmem_cache_destroy(qeth_core_header_cache); root_device_unregister(qeth_core_root_dev); qeth_unregister_dbf_views(); pr_info("core functions removed\n"); } module_init(qeth_core_init); module_exit(qeth_core_exit); MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); MODULE_DESCRIPTION("qeth core functions"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5800_0
crossvul-cpp_data_good_2128_0
/* * Apple "Magic" Wireless Mouse driver * * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> * Copyright (c) 2010 Chase Douglas <chase.douglas@canonical.com> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/hid.h> #include <linux/input/mt.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" static bool emulate_3button = true; module_param(emulate_3button, bool, 0644); MODULE_PARM_DESC(emulate_3button, "Emulate a middle button"); static int middle_button_start = -350; static int middle_button_stop = +350; static bool emulate_scroll_wheel = true; module_param(emulate_scroll_wheel, bool, 0644); MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); static unsigned int scroll_speed = 32; static int param_set_scroll_speed(const char *val, struct kernel_param *kp) { unsigned long speed; if (!val || kstrtoul(val, 0, &speed) || speed > 63) return -EINVAL; scroll_speed = speed; return 0; } module_param_call(scroll_speed, param_set_scroll_speed, param_get_uint, &scroll_speed, 0644); MODULE_PARM_DESC(scroll_speed, "Scroll speed, value from 0 (slow) to 63 (fast)"); static bool scroll_acceleration = false; module_param(scroll_acceleration, bool, 0644); MODULE_PARM_DESC(scroll_acceleration, "Accelerate sequential scroll events"); static bool report_undeciphered; module_param(report_undeciphered, bool, 0644); MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); #define TRACKPAD_REPORT_ID 0x28 #define MOUSE_REPORT_ID 0x29 #define DOUBLE_REPORT_ID 0xf7 /* These definitions are not precise, but they're close enough. (Bits * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem * to be some kind of bit mask -- 0x20 may be a near-field reading, * and 0x40 is actual contact, and 0x10 may be a start/stop or change * indication.) */ #define TOUCH_STATE_MASK 0xf0 #define TOUCH_STATE_NONE 0x00 #define TOUCH_STATE_START 0x30 #define TOUCH_STATE_DRAG 0x40 #define SCROLL_ACCEL_DEFAULT 7 /* Touch surface information. Dimension is in hundredths of a mm, min and max * are in units. */ #define MOUSE_DIMENSION_X (float)9056 #define MOUSE_MIN_X -1100 #define MOUSE_MAX_X 1258 #define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) #define MOUSE_DIMENSION_Y (float)5152 #define MOUSE_MIN_Y -1589 #define MOUSE_MAX_Y 2047 #define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) #define TRACKPAD_DIMENSION_X (float)13000 #define TRACKPAD_MIN_X -2909 #define TRACKPAD_MAX_X 3167 #define TRACKPAD_RES_X \ ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) #define TRACKPAD_DIMENSION_Y (float)11000 #define TRACKPAD_MIN_Y -2456 #define TRACKPAD_MAX_Y 2565 #define TRACKPAD_RES_Y \ ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) /** * struct magicmouse_sc - Tracks Magic Mouse-specific data. * @input: Input device through which we report events. * @quirks: Currently unused. * @ntouches: Number of touches in most recent touch report. * @scroll_accel: Number of consecutive scroll motions. * @scroll_jiffies: Time of last scroll motion. * @touches: Most recent data for a touch, indexed by tracking ID. * @tracking_ids: Mapping of current touch input data to @touches. */ struct magicmouse_sc { struct input_dev *input; unsigned long quirks; int ntouches; int scroll_accel; unsigned long scroll_jiffies; struct { short x; short y; short scroll_x; short scroll_y; u8 size; } touches[16]; int tracking_ids[16]; }; static int magicmouse_firm_touch(struct magicmouse_sc *msc) { int touch = -1; int ii; /* If there is only one "firm" touch, set touch to its * tracking ID. */ for (ii = 0; ii < msc->ntouches; ii++) { int idx = msc->tracking_ids[ii]; if (msc->touches[idx].size < 8) { /* Ignore this touch. */ } else if (touch >= 0) { touch = -1; break; } else { touch = idx; } } return touch; } static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) { int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 | test_bit(BTN_RIGHT, msc->input->key) << 1 | test_bit(BTN_MIDDLE, msc->input->key) << 2; if (emulate_3button) { int id; /* If some button was pressed before, keep it held * down. Otherwise, if there's exactly one firm * touch, use that to override the mouse's guess. */ if (state == 0) { /* The button was released. */ } else if (last_state != 0) { state = last_state; } else if ((id = magicmouse_firm_touch(msc)) >= 0) { int x = msc->touches[id].x; if (x < middle_button_start) state = 1; else if (x > middle_button_stop) state = 2; else state = 4; } /* else: we keep the mouse's guess */ input_report_key(msc->input, BTN_MIDDLE, state & 4); } input_report_key(msc->input, BTN_LEFT, state & 1); input_report_key(msc->input, BTN_RIGHT, state & 2); if (state != last_state) msc->scroll_accel = SCROLL_ACCEL_DEFAULT; } static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) { struct input_dev *input = msc->input; int id, x, y, size, orientation, touch_major, touch_minor, state, down; if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf; x = (tdata[1] << 28 | tdata[0] << 20) >> 20; y = -((tdata[2] << 24 | tdata[1] << 16) >> 20); size = tdata[5] & 0x3f; orientation = (tdata[6] >> 2) - 32; touch_major = tdata[3]; touch_minor = tdata[4]; state = tdata[7] & TOUCH_STATE_MASK; down = state != TOUCH_STATE_NONE; } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf; x = (tdata[1] << 27 | tdata[0] << 19) >> 19; y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19); size = tdata[6] & 0x3f; orientation = (tdata[7] >> 2) - 32; touch_major = tdata[4]; touch_minor = tdata[5]; state = tdata[8] & TOUCH_STATE_MASK; down = state != TOUCH_STATE_NONE; } /* Store tracking ID and other fields. */ msc->tracking_ids[raw_id] = id; msc->touches[id].x = x; msc->touches[id].y = y; msc->touches[id].size = size; /* If requested, emulate a scroll wheel by detecting small * vertical touch motions. */ if (emulate_scroll_wheel) { unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; /* Calculate and apply the scroll motion. */ switch (state) { case TOUCH_STATE_START: msc->touches[id].scroll_x = x; msc->touches[id].scroll_y = y; /* Reset acceleration after half a second. */ if (scroll_acceleration && time_before(now, msc->scroll_jiffies + HZ / 2)) msc->scroll_accel = max_t(int, msc->scroll_accel - 1, 1); else msc->scroll_accel = SCROLL_ACCEL_DEFAULT; break; case TOUCH_STATE_DRAG: step_x /= (64 - (int)scroll_speed) * msc->scroll_accel; if (step_x != 0) { msc->touches[id].scroll_x -= step_x * (64 - scroll_speed) * msc->scroll_accel; msc->scroll_jiffies = now; input_report_rel(input, REL_HWHEEL, -step_x); } step_y /= (64 - (int)scroll_speed) * msc->scroll_accel; if (step_y != 0) { msc->touches[id].scroll_y -= step_y * (64 - scroll_speed) * msc->scroll_accel; msc->scroll_jiffies = now; input_report_rel(input, REL_WHEEL, step_y); } break; } } if (down) msc->ntouches++; input_mt_slot(input, id); input_mt_report_slot_state(input, MT_TOOL_FINGER, down); /* Generate the input events for this touch. */ if (down) { input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); input_report_abs(input, ABS_MT_ORIENTATION, -orientation); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); if (report_undeciphered) { if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) input_event(input, EV_MSC, MSC_RAW, tdata[7]); else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ input_event(input, EV_MSC, MSC_RAW, tdata[8]); } } } static int magicmouse_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); struct input_dev *input = msc->input; int x = 0, y = 0, ii, clicks = 0, npoints; switch (data[0]) { case TRACKPAD_REPORT_ID: /* Expect four bytes of prefix, and N*9 bytes of touch data. */ if (size < 4 || ((size - 4) % 9) != 0) return 0; npoints = (size - 4) / 9; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); clicks = data[1]; /* The following bits provide a device specific timestamp. They * are unused here. * * ts = data[1] >> 6 | data[2] << 2 | data[3] << 10; */ break; case MOUSE_REPORT_ID: /* Expect six bytes of prefix, and N*8 bytes of touch data. */ if (size < 6 || ((size - 6) % 8) != 0) return 0; npoints = (size - 6) / 8; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); /* When emulating three-button mode, it is important * to have the current touch information before * generating a click event. */ x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22; y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22; clicks = data[3]; /* The following bits provide a device specific timestamp. They * are unused here. * * ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; */ break; case DOUBLE_REPORT_ID: /* Sometimes the trackpad sends two touch reports in one * packet. */ magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); break; default: return 0; } if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { magicmouse_emit_buttons(msc, clicks & 3); input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ input_report_key(input, BTN_MOUSE, clicks & 1); input_mt_report_pointer_emulation(input, true); } input_sync(input); return 1; } static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) { int error; __set_bit(EV_KEY, input->evbit); if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { __set_bit(BTN_LEFT, input->keybit); __set_bit(BTN_RIGHT, input->keybit); if (emulate_3button) __set_bit(BTN_MIDDLE, input->keybit); __set_bit(EV_REL, input->evbit); __set_bit(REL_X, input->relbit); __set_bit(REL_Y, input->relbit); if (emulate_scroll_wheel) { __set_bit(REL_WHEEL, input->relbit); __set_bit(REL_HWHEEL, input->relbit); } } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ /* input->keybit is initialized with incorrect button info * for Magic Trackpad. There really is only one physical * button (BTN_LEFT == BTN_MOUSE). Make sure we don't * advertise buttons that don't exist... */ __clear_bit(BTN_RIGHT, input->keybit); __clear_bit(BTN_MIDDLE, input->keybit); __set_bit(BTN_MOUSE, input->keybit); __set_bit(BTN_TOOL_FINGER, input->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); __set_bit(BTN_TOOL_QUADTAP, input->keybit); __set_bit(BTN_TOOL_QUINTTAP, input->keybit); __set_bit(BTN_TOUCH, input->keybit); __set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); } __set_bit(EV_ABS, input->evbit); error = input_mt_init_slots(input, 16, 0); if (error) return error; input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2, 4, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2, 4, 0); input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); /* Note: Touch Y position from the device is inverted relative * to how pointer motion is reported (and relative to how USB * HID recommends the coordinates work). This driver keeps * the origin at the same position, and just uses the additive * inverse of the reported Y. */ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { input_set_abs_params(input, ABS_MT_POSITION_X, MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); input_abs_set_res(input, ABS_MT_POSITION_X, MOUSE_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, MOUSE_RES_Y); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_X, TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD_RES_Y); } input_set_events_per_packet(input, 60); if (report_undeciphered) { __set_bit(EV_MSC, input->evbit); __set_bit(MSC_RAW, input->mscbit); } return 0; } static int magicmouse_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); if (!msc->input) msc->input = hi->input; /* Magic Trackpad does not give relative data after switching to MT */ if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD && field->flags & HID_MAIN_ITEM_RELATIVE) return -1; return 0; } static void magicmouse_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); int ret = magicmouse_setup_input(msc->input, hdev); if (ret) { hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); /* clean msc->input to notify probe() of the failure */ msc->input = NULL; } } static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { __u8 feature[] = { 0xd7, 0x01 }; struct magicmouse_sc *msc; struct hid_report *report; int ret; msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); if (msc == NULL) { hid_err(hdev, "can't alloc magicmouse descriptor\n"); return -ENOMEM; } msc->scroll_accel = SCROLL_ACCEL_DEFAULT; msc->quirks = id->driver_data; hid_set_drvdata(hdev, msc); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "magicmouse hid parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "magicmouse hw start failed\n"); return ret; } if (!msc->input) { hid_err(hdev, "magicmouse input not registered\n"); ret = -ENOMEM; goto err_stop_hw; } if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID); else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD_REPORT_ID); report = hid_register_report(hdev, HID_INPUT_REPORT, DOUBLE_REPORT_ID); } if (!report) { hid_err(hdev, "unable to register touch report\n"); ret = -ENOMEM; goto err_stop_hw; } report->size = 6; /* * Some devices repond with 'invalid report id' when feature * report switching it into multitouch mode is sent to it. * * This results in -EIO from the _raw low-level transport callback, * but there seems to be no other way of switching the mode. * Thus the super-ugly hacky success check below. */ ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != -EIO && ret != sizeof(feature)) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; } return 0; err_stop_hw: hid_hw_stop(hdev); return ret; } static const struct hid_device_id magic_mice[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 }, { } }; MODULE_DEVICE_TABLE(hid, magic_mice); static struct hid_driver magicmouse_driver = { .name = "magicmouse", .id_table = magic_mice, .probe = magicmouse_probe, .raw_event = magicmouse_raw_event, .input_mapping = magicmouse_input_mapping, .input_configured = magicmouse_input_configured, }; module_hid_driver(magicmouse_driver); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-119/c/good_2128_0
crossvul-cpp_data_good_641_0
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2003-2007, Francois-Olivier Devaux * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "openjpeg.h" #include "cio.h" #include "j2k.h" #include "jp2.h" #include "mj2.h" /* -------------------------------------------------------------------------- */ /** sample error callback expecting a FILE* client object */ void error_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[ERROR] %s", msg); } /** sample warning callback expecting a FILE* client object */ void warning_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[WARNING] %s", msg); } /** sample debug callback expecting a FILE* client object */ void info_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[INFO] %s", msg); } /* -------------------------------------------------------------------------- */ int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ int num = snprintf(outfilename, sizeof(outfilename), "%s_%05d.j2k", argv[2], snum); if (num >= sizeof(outfilename)) { fprintf(stderr, "maximum length of output prefix exceeded\n"); return 1; } outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_641_0
crossvul-cpp_data_bad_3508_0
/* Broadcom B43 wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "dma.h" #include "main.h" #include "debugfs.h" #include "xmit.h" #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <asm/div64.h> /* Required number of TX DMA slots per TX frame. * This currently is 2, because we put the header and the ieee80211 frame * into separate slots. */ #define TX_SLOTS_PER_FRAME 2 /* 32bit DMA ops. */ static struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc32 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op32_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc32 *descbase = ring->descbase; int slot; u32 ctl; u32 addr; u32 addrext; slot = (int)(&(desc->dma32) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addr |= ssb_dma_translation(ring->dev->dev); ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; if (start) ctl |= B43_DMA32_DCTL_FRAMESTART; if (end) ctl |= B43_DMA32_DCTL_FRAMEEND; if (irq) ctl |= B43_DMA32_DCTL_IRQ; ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) & B43_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl); desc->dma32.address = cpu_to_le32(addr); } static void op32_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static void op32_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) | B43_DMA32_TXSUSPEND); } static void op32_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) & ~B43_DMA32_TXSUSPEND); } static int op32_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA32_RXSTATUS); val &= B43_DMA32_RXDPTR; return (val / sizeof(struct b43_dmadesc32)); } static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static const struct b43_dma_ops dma32_ops = { .idx2desc = op32_idx2desc, .fill_descriptor = op32_fill_descriptor, .poke_tx = op32_poke_tx, .tx_suspend = op32_tx_suspend, .tx_resume = op32_tx_resume, .get_current_rxslot = op32_get_current_rxslot, .set_current_rxslot = op32_set_current_rxslot, }; /* 64bit DMA ops. */ static struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc64 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op64_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc64 *descbase = ring->descbase; int slot; u32 ctl0 = 0, ctl1 = 0; u32 addrlo, addrhi; u32 addrext; slot = (int)(&(desc->dma64) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addrlo = (u32) (dmaaddr & 0xFFFFFFFF); addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= B43_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= B43_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= B43_DMA64_DCTL0_IRQ; ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & B43_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); } static void op64_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static void op64_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) | B43_DMA64_TXSUSPEND); } static void op64_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) & ~B43_DMA64_TXSUSPEND); } static int op64_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA64_RXSTATUS); val &= B43_DMA64_RXSTATDPTR; return (val / sizeof(struct b43_dmadesc64)); } static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static const struct b43_dma_ops dma64_ops = { .idx2desc = op64_idx2desc, .fill_descriptor = op64_fill_descriptor, .poke_tx = op64_poke_tx, .tx_suspend = op64_tx_suspend, .tx_resume = op64_tx_resume, .get_current_rxslot = op64_get_current_rxslot, .set_current_rxslot = op64_set_current_rxslot, }; static inline int free_slots(struct b43_dmaring *ring) { return (ring->nr_slots - ring->used_slots); } static inline int next_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); if (slot == ring->nr_slots - 1) return 0; return slot + 1; } static inline int prev_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); if (slot == 0) return ring->nr_slots - 1; return slot - 1; } #ifdef CONFIG_B43_DEBUG static void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { if (current_used_slots <= ring->max_used_slots) return; ring->max_used_slots = current_used_slots; if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { b43dbg(ring->dev->wl, "max_used_slots increased to %d on %s ring %d\n", ring->max_used_slots, ring->tx ? "TX" : "RX", ring->index); } } #else static inline void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { } #endif /* DEBUG */ /* Request a slot for usage. */ static inline int request_slot(struct b43_dmaring *ring) { int slot; B43_WARN_ON(!ring->tx); B43_WARN_ON(ring->stopped); B43_WARN_ON(free_slots(ring) == 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; update_max_used_slots(ring, ring->used_slots); return slot; } static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) { static const u16 map64[] = { B43_MMIO_DMA64_BASE0, B43_MMIO_DMA64_BASE1, B43_MMIO_DMA64_BASE2, B43_MMIO_DMA64_BASE3, B43_MMIO_DMA64_BASE4, B43_MMIO_DMA64_BASE5, }; static const u16 map32[] = { B43_MMIO_DMA32_BASE0, B43_MMIO_DMA32_BASE1, B43_MMIO_DMA32_BASE2, B43_MMIO_DMA32_BASE3, B43_MMIO_DMA32_BASE4, B43_MMIO_DMA32_BASE5, }; if (type == B43_DMA_64BIT) { B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64))); return map64[controller_idx]; } B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32))); return map32[controller_idx]; } static inline dma_addr_t map_descbuffer(struct b43_dmaring *ring, unsigned char *buf, size_t len, int tx) { dma_addr_t dmaaddr; if (tx) { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); } else { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); } return dmaaddr; } static inline void unmap_descbuffer(struct b43_dmaring *ring, dma_addr_t addr, size_t len, int tx) { if (tx) { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); } else { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } } static inline void sync_descbuffer_for_cpu(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void sync_descbuffer_for_device(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void free_descriptor_buffer(struct b43_dmaring *ring, struct b43_dmadesc_meta *meta) { if (meta->skb) { dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } static int alloc_ringmemory(struct b43_dmaring *ring) { gfp_t flags = GFP_KERNEL; /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing * has shown that 4K is sufficient for the latter as long as the buffer * does not cross an 8K boundary. * * For unknown reasons - possibly a hardware error - the BCM4311 rev * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, * which accounts for the GFP_DMA flag below. * * The flags here must match the flags in free_ringmemory below! */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, &(ring->dmabase), flags); if (!ring->descbase) { b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); return -ENOMEM; } memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); return 0; } static void free_ringmemory(struct b43_dmaring *ring) { dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_RXSTAT; if (value == B43_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_RXSTATE; if (value == B43_DMA32_RXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA RX reset timed out\n"); return -ENODEV; } return 0; } /* Reset the TX DMA channel */ static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED || value == B43_DMA64_TXSTAT_IDLEWAIT || value == B43_DMA64_TXSTAT_STOPPED) break; } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED || value == B43_DMA32_TXSTAT_IDLEWAIT || value == B43_DMA32_TXSTAT_STOPPED) break; } msleep(1); } offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA TX reset timed out\n"); return -ENODEV; } /* ensure the reset is completed. */ msleep(1); return 0; } /* Check if a DMA mapping address is invalid. */ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { case B43_DMA_30BIT: if ((u64)addr + buffersize > (1ULL << 30)) goto address_error; break; case B43_DMA_32BIT: if ((u64)addr + buffersize > (1ULL << 32)) goto address_error; break; case B43_DMA_64BIT: /* Currently we can't have addresses beyond * 64bit in the kernel. */ break; } /* The address is OK. */ return 0; address_error: /* We can't support this address. Unmap it again. */ unmap_descbuffer(ring, addr, buffersize, dma_to_device); return 1; } static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) { unsigned char *f = skb->data + ring->frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); } static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) { struct b43_rxhdr_fw4 *rxhdr; unsigned char *frame; /* This poisons the RX buffer to detect DMA failures. */ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); rxhdr->frame_len = 0; B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); frame = skb->data + ring->frameoffset; memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); } static int setup_rx_descbuffer(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, struct b43_dmadesc_meta *meta, gfp_t gfp_flags) { dma_addr_t dmaaddr; struct sk_buff *skb; B43_WARN_ON(ring->tx); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { /* ugh. try to realloc in zone_dma */ gfp_flags |= GFP_DMA; dev_kfree_skb_any(skb); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); dev_kfree_skb_any(skb); return -EIO; } } meta->skb = skb; meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); return 0; } /* Allocate the initial descbuffers. * This is used for an RX ring only. */ static int alloc_initial_descbuffers(struct b43_dmaring *ring) { int i, err = -ENOMEM; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) { b43err(ring->dev->wl, "Failed to allocate initial descbuffers\n"); goto err_unwind; } } mb(); ring->used_slots = ring->nr_slots; err = 0; out: return err; err_unwind: for (i--; i >= 0; i--) { desc = ring->ops->idx2desc(ring, i, &meta); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb(meta->skb); } goto out; } /* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */ static int dmacontroller_setup(struct b43_dmaring *ring) { int err = 0; u32 value; u32 addrext; u32 trans = ssb_dma_translation(ring->dev->dev); if (ring->tx) { if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA64_TXENABLE; value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) & B43_DMA64_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_TXCTL, value); b43_dma_write(ring, B43_DMA64_TXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_TXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA32_TXENABLE; value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) & B43_DMA32_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_TXCTL, value); b43_dma_write(ring, B43_DMA32_TXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); } } else { err = alloc_initial_descbuffers(ring); if (err) goto out; if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); value |= B43_DMA64_RXENABLE; value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) & B43_DMA64_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_RXCTL, value); b43_dma_write(ring, B43_DMA64_RXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_RXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc64)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); value |= B43_DMA32_RXENABLE; value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) & B43_DMA32_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_RXCTL, value); b43_dma_write(ring, B43_DMA32_RXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc32)); } } out: return err; } /* Shutdown the DMA controller. */ static void dmacontroller_cleanup(struct b43_dmaring *ring) { if (ring->tx) { b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_TXRING, 0); } else { b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_RXRING, 0); } } static void free_all_descbuffers(struct b43_dmaring *ring) { struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); continue; } if (ring->tx) { unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); } else { unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); } free_descriptor_buffer(ring, meta); } } static u64 supported_dma_mask(struct b43_wldev *dev) { u32 tmp; u16 mmio_base; tmp = b43_read32(dev, SSB_TMSHIGH); if (tmp & SSB_TMSHIGH_DMA64) return DMA_BIT_MASK(64); mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); if (tmp & B43_DMA32_TXADDREXT_MASK) return DMA_BIT_MASK(32); return DMA_BIT_MASK(30); } static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) { if (dmamask == DMA_BIT_MASK(30)) return B43_DMA_30BIT; if (dmamask == DMA_BIT_MASK(32)) return B43_DMA_32BIT; if (dmamask == DMA_BIT_MASK(64)) return B43_DMA_64BIT; B43_WARN_ON(1); return B43_DMA_30BIT; } /* Main initialization function. */ static struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, int controller_index, int for_tx, enum b43_dmatype type) { struct b43_dmaring *ring; int i, err; dma_addr_t dma_test; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; ring->nr_slots = B43_RXRING_SLOTS; if (for_tx) ring->nr_slots = B43_TXRING_SLOTS; ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; for (i = 0; i < ring->nr_slots; i++) ring->meta->skb = B43_DMA_PTR_POISON; ring->type = type; ring->dev = dev; ring->mmio_base = b43_dmacontroller_base(type, controller_index); ring->index = controller_index; if (type == B43_DMA_64BIT) ring->ops = &dma64_ops; else ring->ops = &dma32_ops; if (for_tx) { ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; } else B43_WARN_ON(1); } #ifdef CONFIG_B43_DEBUG ring->last_injected_overflow = jiffies; #endif if (for_tx) { /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL); if (!ring->txhdr_cache) goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { /* ugh realloc */ kfree(ring->txhdr_cache); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL | GFP_DMA); if (!ring->txhdr_cache) goto err_kfree_meta; dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { b43err(dev->wl, "TXHDR DMA allocation failed\n"); goto err_kfree_txhdr_cache; } } dma_unmap_single(dev->dev->dma_dev, dma_test, b43_txhdr_size(dev), DMA_TO_DEVICE); } err = alloc_ringmemory(ring); if (err) goto err_kfree_txhdr_cache; err = dmacontroller_setup(ring); if (err) goto err_free_ringmemory; out: return ring; err_free_ringmemory: free_ringmemory(ring); err_kfree_txhdr_cache: kfree(ring->txhdr_cache); err_kfree_meta: kfree(ring->meta); err_kfree_ring: kfree(ring); ring = NULL; goto out; } #define divide(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ __a; \ }) #define modulo(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ }) /* Main cleanup function. */ static void b43_destroy_dmaring(struct b43_dmaring *ring, const char *ringname) { if (!ring) return; #ifdef CONFIG_B43_DEBUG { /* Print some statistics. */ u64 failed_packets = ring->nr_failed_tx_packets; u64 succeed_packets = ring->nr_succeed_tx_packets; u64 nr_packets = failed_packets + succeed_packets; u64 permille_failed = 0, average_tries = 0; if (nr_packets) permille_failed = divide(failed_packets * 1000, nr_packets); if (nr_packets) average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); b43dbg(ring->dev->wl, "DMA-%u %s: " "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " "Average tries %llu.%02llu\n", (unsigned int)(ring->type), ringname, ring->max_used_slots, ring->nr_slots, (unsigned long long)failed_packets, (unsigned long long)nr_packets, (unsigned long long)divide(permille_failed, 10), (unsigned long long)modulo(permille_failed, 10), (unsigned long long)divide(average_tries, 100), (unsigned long long)modulo(average_tries, 100)); } #endif /* DEBUG */ /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff. */ dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); } #define destroy_ring(dma, ring) do { \ b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ (dma)->ring = NULL; \ } while (0) void b43_dma_free(struct b43_wldev *dev) { struct b43_dma *dma; if (b43_using_pio_transfers(dev)) return; dma = &dev->dma; destroy_ring(dma, rx_ring); destroy_ring(dma, tx_ring_AC_BK); destroy_ring(dma, tx_ring_AC_BE); destroy_ring(dma, tx_ring_AC_VI); destroy_ring(dma, tx_ring_AC_VO); destroy_ring(dma, tx_ring_mcast); } static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { err = dma_set_mask(dev->dev->dma_dev, mask); if (!err) { err = dma_set_coherent_mask(dev->dev->dma_dev, mask); if (!err) break; } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); fallback = 1; continue; } b43err(dev->wl, "The machine/kernel does not support " "the required %u-bit DMA mask\n", (unsigned int)dma_mask_to_engine_type(orig_mask)); return -EOPNOTSUPP; } if (fallback) { b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", (unsigned int)dma_mask_to_engine_type(orig_mask), (unsigned int)dma_mask_to_engine_type(mask)); } return 0; } int b43_dma_init(struct b43_wldev *dev) { struct b43_dma *dma = &dev->dma; int err; u64 dmamask; enum b43_dmatype type; dmamask = supported_dma_mask(dev); type = dma_mask_to_engine_type(dmamask); err = b43_dma_set_mask(dev, dmamask); if (err) return err; err = -ENOMEM; /* setup TX DMA channels. */ dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); if (!dma->tx_ring_AC_BK) goto out; dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); if (!dma->tx_ring_AC_BE) goto err_destroy_bk; dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); if (!dma->tx_ring_AC_VI) goto err_destroy_be; dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); if (!dma->tx_ring_AC_VO) goto err_destroy_vi; dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); if (!dma->tx_ring_mcast) goto err_destroy_vo; /* setup RX DMA channel. */ dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); if (!dma->rx_ring) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ B43_WARN_ON(dev->dev->id.revision < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); err = 0; out: return err; err_destroy_mcast: destroy_ring(dma, tx_ring_mcast); err_destroy_vo: destroy_ring(dma, tx_ring_AC_VO); err_destroy_vi: destroy_ring(dma, tx_ring_AC_VI); err_destroy_be: destroy_ring(dma, tx_ring_AC_BE); err_destroy_bk: destroy_ring(dma, tx_ring_AC_BK); return err; } /* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43_dmaring *ring, int slot) { u16 cookie; /* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. * It can also not be 0xFFFF because that is special * for multicast frames. */ cookie = (((u16)ring->index + 1) << 12); B43_WARN_ON(slot & ~0x0FFF); cookie |= (u16)slot; return cookie; } /* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) { struct b43_dma *dma = &dev->dma; struct b43_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0x1000: ring = dma->tx_ring_AC_BK; break; case 0x2000: ring = dma->tx_ring_AC_BE; break; case 0x3000: ring = dma->tx_ring_AC_VI; break; case 0x4000: ring = dma->tx_ring_AC_VO; break; case 0x5000: ring = dma->tx_ring_mcast; break; } *slot = (cookie & 0x0FFF); if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { b43dbg(dev->wl, "TX-status contains " "invalid cookie: 0x%04X\n", cookie); return NULL; } return ring; } static int dma_tx_fragment(struct b43_dmaring *ring, struct sk_buff *skb) { const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); /* Important note: If the number of used DMA slots per TX frame * is changed here, the TX_SLOTS_PER_FRAME definition at the top of * the file has to be updated, too! */ old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; /* Get a slot for the header. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); cookie = generate_cookie(ring, slot); err = b43_generate_txhdr(ring->dev, header, skb, info, cookie); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, hdrsize, 1); if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, hdrsize, 1, 0, 0); /* Get a slot for the payload. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = 1; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { priv_info->bouncebuffer = kmemdup(skb->data, skb->len, GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_unmap_hdr; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ b43_shm_write16(ring->dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } /* Now transfer the whole frame. */ wmb(); ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); return err; } static inline int should_inject_overflow(struct b43_dmaring *ring) { #ifdef CONFIG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow * to test handling of this situation in the stack. */ unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43dbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif /* CONFIG_B43_DEBUG */ return 0; } /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, u8 queue_prio) { struct b43_dmaring *ring; if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default: B43_WARN_ON(1); /* fallthrough */ case 0: ring = dev->dma.tx_ring_AC_VO; break; case 1: ring = dev->dma.tx_ring_AC_VI; break; case 2: ring = dev->dma.tx_ring_AC_BE; break; case 3: ring = dev->dma.tx_ring_AC_BK; break; } } else ring = dev->dma.tx_ring_AC_BE; return ring; } int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast ring will be sent after the DTIM */ ring = dev->dma.tx_ring_mcast; /* Set the more-data bit. Ucode will clear it on * the last frame for us. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { /* Decide by priority where to put this frame. */ ring = select_ring_by_priority( dev, skb_get_queue_mapping(skb)); } B43_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't. * For now, just refuse the transmit. */ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) b43err(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out; } if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43err(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out; } /* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The queue to ring mapping is * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); ring->stopped = 1; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } } out: return err; } void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " "Expected %d, but got %d\n", ring->index, firstused, slot); return; } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); desc = ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, so * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. */ frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } } static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; int slot, current_slot; int used_slots = 0; B43_WARN_ON(ring->tx); current_slot = ops->get_current_rxslot(ring); B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); slot = ring->current_slot; for (; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_suspend(ring); } static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_resume(ring); } void b43_dma_tx_suspend(struct b43_wldev *dev) { b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); } void b43_dma_tx_resume(struct b43_wldev *dev) { b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); b43_power_saving_ctl_bits(dev, 0); } static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, u16 mmio_base, bool enable) { u32 ctl; if (type == B43_DMA_64BIT) { ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); ctl &= ~B43_DMA64_RXDIRECTFIFO; if (enable) ctl |= B43_DMA64_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); } else { ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); ctl &= ~B43_DMA32_RXDIRECTFIFO; if (enable) ctl |= B43_DMA32_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); } } /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. * This is called from PIO code, so DMA structures are not available. */ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, unsigned int engine_index, bool enable) { enum b43_dmatype type; u16 mmio_base; type = dma_mask_to_engine_type(supported_dma_mask(dev)); mmio_base = b43_dmacontroller_base(type, engine_index); direct_fifo_rx(dev, type, mmio_base, enable); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3508_0
crossvul-cpp_data_good_4735_0
/* * NetLabel CIPSO/IPv4 Support * * This file defines the CIPSO/IPv4 functions for the NetLabel system. The * NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/audit.h> #include <net/sock.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include "netlabel_user.h" #include "netlabel_cipso_v4.h" /* Argument struct for cipso_v4_doi_walk() */ struct netlbl_cipsov4_doiwalk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; /* NetLabel Generic NETLINK CIPSOv4 family */ static struct genl_family netlbl_cipsov4_gnl_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = NETLBL_NLTYPE_CIPSOV4_NAME, .version = NETLBL_PROTO_VERSION, .maxattr = NLBL_CIPSOV4_A_MAX, }; /* NetLabel Netlink attribute policy */ static struct nla_policy netlbl_cipsov4_genl_policy[NLBL_CIPSOV4_A_MAX + 1] = { [NLBL_CIPSOV4_A_DOI] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MTYPE] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_TAG] = { .type = NLA_U8 }, [NLBL_CIPSOV4_A_TAGLST] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSLVLLOC] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSLVLREM] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSLVL] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSLVLLST] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSCATLOC] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSCATREM] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSCAT] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSCATLST] = { .type = NLA_NESTED }, }; /* * Helper Functions */ /** * netlbl_cipsov4_doi_free - Frees a CIPSO V4 DOI definition * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void netlbl_cipsov4_doi_free(struct rcu_head *entry) { struct cipso_v4_doi *ptr; ptr = container_of(entry, struct cipso_v4_doi, rcu); switch (ptr->type) { case CIPSO_V4_MAP_STD: kfree(ptr->map.std->lvl.cipso); kfree(ptr->map.std->lvl.local); kfree(ptr->map.std->cat.cipso); kfree(ptr->map.std->cat.local); break; } kfree(ptr); } /** * netlbl_cipsov4_add_common - Parse the common sections of a ADD message * @info: the Generic NETLINK info block * @doi_def: the CIPSO V4 DOI definition * * Description: * Parse the common sections of a ADD message and fill in the related values * in @doi_def. Returns zero on success, negative values on failure. * */ static int netlbl_cipsov4_add_common(struct genl_info *info, struct cipso_v4_doi *doi_def) { struct nlattr *nla; int nla_rem; u32 iter = 0; doi_def->doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_TAGLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) return -EINVAL; nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) if (nla->nla_type == NLBL_CIPSOV4_A_TAG) { if (iter >= CIPSO_V4_TAG_MAXCNT) return -EINVAL; doi_def->tags[iter++] = nla_get_u8(nla); } while (iter < CIPSO_V4_TAG_MAXCNT) doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; return 0; } /* * NetLabel Command Handlers */ /** * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition * @info: the Generic NETLINK info block * * Description: * Create a new CIPSO_V4_MAP_STD DOI definition based on the given ADD message * and add it to the CIPSO V4 engine. Return zero on success and non-zero on * error. * */ static int netlbl_cipsov4_add_std(struct genl_info *info) { int ret_val = -EINVAL; struct cipso_v4_doi *doi_def = NULL; struct nlattr *nla_a; struct nlattr *nla_b; int nla_a_rem; int nla_b_rem; u32 iter; if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] || !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) return -EINVAL; if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); if (doi_def == NULL) return -ENOMEM; doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL); if (doi_def->map.std == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->type = CIPSO_V4_MAP_STD; ret_val = netlbl_cipsov4_add_common(info, doi_def); if (ret_val != 0) goto add_std_failure; ret_val = -EINVAL; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { if (nla_validate_nested(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_b->nla_type) { case NLBL_CIPSOV4_A_MLSLVLLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_LVLS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->lvl.local_size) doi_def->map.std->lvl.local_size = nla_get_u32(nla_b) + 1; break; case NLBL_CIPSOV4_A_MLSLVLREM: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_REM_LVLS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->lvl.cipso_size) doi_def->map.std->lvl.cipso_size = nla_get_u32(nla_b) + 1; break; } } doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, sizeof(u32), GFP_KERNEL); if (doi_def->map.std->lvl.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size, sizeof(u32), GFP_KERNEL); if (doi_def->map.std->lvl.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; } for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { struct nlattr *lvl_loc; struct nlattr *lvl_rem; lvl_loc = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSLVLLOC); lvl_rem = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSLVLREM); if (lvl_loc == NULL || lvl_rem == NULL) goto add_std_failure; doi_def->map.std->lvl.local[nla_get_u32(lvl_loc)] = nla_get_u32(lvl_rem); doi_def->map.std->lvl.cipso[nla_get_u32(lvl_rem)] = nla_get_u32(lvl_loc); } if (info->attrs[NLBL_CIPSOV4_A_MLSCATLST]) { if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_MLSCATLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) goto add_std_failure; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSCAT) { if (nla_validate_nested(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_b->nla_type) { case NLBL_CIPSOV4_A_MLSCATLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_CATS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->cat.local_size) doi_def->map.std->cat.local_size = nla_get_u32(nla_b) + 1; break; case NLBL_CIPSOV4_A_MLSCATREM: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_REM_CATS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->cat.cipso_size) doi_def->map.std->cat.cipso_size = nla_get_u32(nla_b) + 1; break; } } doi_def->map.std->cat.local = kcalloc( doi_def->map.std->cat.local_size, sizeof(u32), GFP_KERNEL); if (doi_def->map.std->cat.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->cat.cipso = kcalloc( doi_def->map.std->cat.cipso_size, sizeof(u32), GFP_KERNEL); if (doi_def->map.std->cat.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; } for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT; for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++) doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSCAT) { struct nlattr *cat_loc; struct nlattr *cat_rem; cat_loc = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSCATLOC); cat_rem = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSCATREM); if (cat_loc == NULL || cat_rem == NULL) goto add_std_failure; doi_def->map.std->cat.local[ nla_get_u32(cat_loc)] = nla_get_u32(cat_rem); doi_def->map.std->cat.cipso[ nla_get_u32(cat_rem)] = nla_get_u32(cat_loc); } } ret_val = cipso_v4_doi_add(doi_def); if (ret_val != 0) goto add_std_failure; return 0; add_std_failure: if (doi_def) netlbl_cipsov4_doi_free(&doi_def->rcu); return ret_val; } /** * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition * @info: the Generic NETLINK info block * * Description: * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message * and add it to the CIPSO V4 engine. Return zero on success and non-zero on * error. * */ static int netlbl_cipsov4_add_pass(struct genl_info *info) { int ret_val; struct cipso_v4_doi *doi_def = NULL; if (!info->attrs[NLBL_CIPSOV4_A_TAGLST]) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); if (doi_def == NULL) return -ENOMEM; doi_def->type = CIPSO_V4_MAP_PASS; ret_val = netlbl_cipsov4_add_common(info, doi_def); if (ret_val != 0) goto add_pass_failure; ret_val = cipso_v4_doi_add(doi_def); if (ret_val != 0) goto add_pass_failure; return 0; add_pass_failure: netlbl_cipsov4_doi_free(&doi_def->rcu); return ret_val; } /** * netlbl_cipsov4_add - Handle an ADD message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Create a new DOI definition based on the given ADD message and add it to the * CIPSO V4 engine. Returns zero on success, negative values on failure. * */ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info) { int ret_val = -EINVAL; u32 type; u32 doi; const char *type_str = "(unknown)"; struct audit_buffer *audit_buf; struct netlbl_audit audit_info; if (!info->attrs[NLBL_CIPSOV4_A_DOI] || !info->attrs[NLBL_CIPSOV4_A_MTYPE]) return -EINVAL; doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); netlbl_netlink_auditinfo(skb, &audit_info); type = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE]); switch (type) { case CIPSO_V4_MAP_STD: type_str = "std"; ret_val = netlbl_cipsov4_add_std(info); break; case CIPSO_V4_MAP_PASS: type_str = "pass"; ret_val = netlbl_cipsov4_add_pass(info); break; } audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD, &audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * netlbl_cipsov4_list - Handle a LIST message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated LIST message and respond accordingly. While the * response message generated by the kernel is straightforward, determining * before hand the size of the buffer to allocate is not (we have to generate * the message to know the size). In order to keep this function sane what we * do is allocate a buffer of NLMSG_GOODSIZE and try to fit the response in * that size, if we fail then we restart with a larger buffer and try again. * We continue in this manner until we hit a limit of failed attempts then we * give up and just send an error message. Returns zero on success and * negative values on error. * */ static int netlbl_cipsov4_list(struct sk_buff *skb, struct genl_info *info) { int ret_val; struct sk_buff *ans_skb = NULL; u32 nlsze_mult = 1; void *data; u32 doi; struct nlattr *nla_a; struct nlattr *nla_b; struct cipso_v4_doi *doi_def; u32 iter; if (!info->attrs[NLBL_CIPSOV4_A_DOI]) { ret_val = -EINVAL; goto list_failure; } list_start: ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE * nlsze_mult, GFP_KERNEL); if (ans_skb == NULL) { ret_val = -ENOMEM; goto list_failure; } data = genlmsg_put_reply(ans_skb, info, &netlbl_cipsov4_gnl_family, 0, NLBL_CIPSOV4_C_LIST); if (data == NULL) { ret_val = -ENOMEM; goto list_failure; } doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); rcu_read_lock(); doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) { ret_val = -EINVAL; goto list_failure; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MTYPE, doi_def->type); if (ret_val != 0) goto list_failure_lock; nla_a = nla_nest_start(ans_skb, NLBL_CIPSOV4_A_TAGLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_failure_lock; } for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID; iter++) { ret_val = nla_put_u8(ans_skb, NLBL_CIPSOV4_A_TAG, doi_def->tags[iter]); if (ret_val != 0) goto list_failure_lock; } nla_nest_end(ans_skb, nla_a); switch (doi_def->type) { case CIPSO_V4_MAP_STD: nla_a = nla_nest_start(ans_skb, NLBL_CIPSOV4_A_MLSLVLLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_failure_lock; } for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) { if (doi_def->map.std->lvl.local[iter] == CIPSO_V4_INV_LVL) continue; nla_b = nla_nest_start(ans_skb, NLBL_CIPSOV4_A_MLSLVL); if (nla_b == NULL) { ret_val = -ENOMEM; goto list_retry; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSLVLLOC, iter); if (ret_val != 0) goto list_retry; ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSLVLREM, doi_def->map.std->lvl.local[iter]); if (ret_val != 0) goto list_retry; nla_nest_end(ans_skb, nla_b); } nla_nest_end(ans_skb, nla_a); nla_a = nla_nest_start(ans_skb, NLBL_CIPSOV4_A_MLSCATLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_retry; } for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) { if (doi_def->map.std->cat.local[iter] == CIPSO_V4_INV_CAT) continue; nla_b = nla_nest_start(ans_skb, NLBL_CIPSOV4_A_MLSCAT); if (nla_b == NULL) { ret_val = -ENOMEM; goto list_retry; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSCATLOC, iter); if (ret_val != 0) goto list_retry; ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSCATREM, doi_def->map.std->cat.local[iter]); if (ret_val != 0) goto list_retry; nla_nest_end(ans_skb, nla_b); } nla_nest_end(ans_skb, nla_a); break; } rcu_read_unlock(); genlmsg_end(ans_skb, data); ret_val = genlmsg_reply(ans_skb, info); if (ret_val != 0) goto list_failure; return 0; list_retry: /* XXX - this limit is a guesstimate */ if (nlsze_mult < 4) { rcu_read_unlock(); kfree_skb(ans_skb); nlsze_mult++; goto list_start; } list_failure_lock: rcu_read_unlock(); list_failure: kfree_skb(ans_skb); return ret_val; } /** * netlbl_cipsov4_listall_cb - cipso_v4_doi_walk() callback for LISTALL * @doi_def: the CIPSOv4 DOI definition * @arg: the netlbl_cipsov4_doiwalk_arg structure * * Description: * This function is designed to be used as a callback to the * cipso_v4_doi_walk() function for use in generating a response for a LISTALL * message. Returns the size of the message on success, negative values on * failure. * */ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg) { int ret_val = -ENOMEM; struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg; void *data; data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, cb_arg->seq, &netlbl_cipsov4_gnl_family, NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL); if (data == NULL) goto listall_cb_failure; ret_val = nla_put_u32(cb_arg->skb, NLBL_CIPSOV4_A_DOI, doi_def->doi); if (ret_val != 0) goto listall_cb_failure; ret_val = nla_put_u32(cb_arg->skb, NLBL_CIPSOV4_A_MTYPE, doi_def->type); if (ret_val != 0) goto listall_cb_failure; return genlmsg_end(cb_arg->skb, data); listall_cb_failure: genlmsg_cancel(cb_arg->skb, data); return ret_val; } /** * netlbl_cipsov4_listall - Handle a LISTALL message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated LISTALL message and respond accordingly. Returns * zero on success and negative values on error. * */ static int netlbl_cipsov4_listall(struct sk_buff *skb, struct netlink_callback *cb) { struct netlbl_cipsov4_doiwalk_arg cb_arg; int doi_skip = cb->args[0]; cb_arg.nl_cb = cb; cb_arg.skb = skb; cb_arg.seq = cb->nlh->nlmsg_seq; cipso_v4_doi_walk(&doi_skip, netlbl_cipsov4_listall_cb, &cb_arg); cb->args[0] = doi_skip; return skb->len; } /** * netlbl_cipsov4_remove - Handle a REMOVE message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated REMOVE message and respond accordingly. Returns * zero on success, negative values on failure. * */ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) { int ret_val = -EINVAL; u32 doi = 0; struct audit_buffer *audit_buf; struct netlbl_audit audit_info; if (!info->attrs[NLBL_CIPSOV4_A_DOI]) return -EINVAL; doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); netlbl_netlink_auditinfo(skb, &audit_info); ret_val = cipso_v4_doi_remove(doi, &audit_info, netlbl_cipsov4_doi_free); audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL, &audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /* * NetLabel Generic NETLINK Command Definitions */ static struct genl_ops netlbl_cipsov4_genl_c_add = { .cmd = NLBL_CIPSOV4_C_ADD, .flags = GENL_ADMIN_PERM, .policy = netlbl_cipsov4_genl_policy, .doit = netlbl_cipsov4_add, .dumpit = NULL, }; static struct genl_ops netlbl_cipsov4_genl_c_remove = { .cmd = NLBL_CIPSOV4_C_REMOVE, .flags = GENL_ADMIN_PERM, .policy = netlbl_cipsov4_genl_policy, .doit = netlbl_cipsov4_remove, .dumpit = NULL, }; static struct genl_ops netlbl_cipsov4_genl_c_list = { .cmd = NLBL_CIPSOV4_C_LIST, .flags = 0, .policy = netlbl_cipsov4_genl_policy, .doit = netlbl_cipsov4_list, .dumpit = NULL, }; static struct genl_ops netlbl_cipsov4_genl_c_listall = { .cmd = NLBL_CIPSOV4_C_LISTALL, .flags = 0, .policy = netlbl_cipsov4_genl_policy, .doit = NULL, .dumpit = netlbl_cipsov4_listall, }; /* * NetLabel Generic NETLINK Protocol Functions */ /** * netlbl_cipsov4_genl_init - Register the CIPSOv4 NetLabel component * * Description: * Register the CIPSOv4 packet NetLabel component with the Generic NETLINK * mechanism. Returns zero on success, negative values on failure. * */ int netlbl_cipsov4_genl_init(void) { int ret_val; ret_val = genl_register_family(&netlbl_cipsov4_gnl_family); if (ret_val != 0) return ret_val; ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family, &netlbl_cipsov4_genl_c_add); if (ret_val != 0) return ret_val; ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family, &netlbl_cipsov4_genl_c_remove); if (ret_val != 0) return ret_val; ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family, &netlbl_cipsov4_genl_c_list); if (ret_val != 0) return ret_val; ret_val = genl_register_ops(&netlbl_cipsov4_gnl_family, &netlbl_cipsov4_genl_c_listall); if (ret_val != 0) return ret_val; return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_4735_0
crossvul-cpp_data_good_798_0
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / common tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/tools.h> #include <gpac/network.h> #if defined(_WIN32_WCE) #include <winbase.h> #include <winsock.h> #include <tlhelp32.h> //#include <direct.h> #if !defined(__GNUC__) #pragma comment(lib, "toolhelp") #endif #elif defined(WIN32) #include <time.h> #include <sys/timeb.h> #include <io.h> #include <windows.h> #include <tlhelp32.h> #include <direct.h> #if !defined(__GNUC__) #pragma comment(lib, "winmm") #endif #else #include <time.h> #include <sys/stat.h> #include <sys/time.h> #include <dirent.h> #include <unistd.h> #include <sys/times.h> #include <sys/resource.h> #ifndef __BEOS__ #include <errno.h> #endif #define SLEEP_ABS_SELECT 1 static u32 sys_start_time = 0; static u64 sys_start_time_hr = 0; #endif #ifndef _WIN32_WCE #include <locale.h> #endif #ifndef WIN32 GF_EXPORT u32 gf_sys_clock() { struct timeval now; gettimeofday(&now, NULL); return (u32) ( ( (now.tv_sec)*1000 + (now.tv_usec) / 1000) - sys_start_time ); } GF_EXPORT u64 gf_sys_clock_high_res() { struct timeval now; gettimeofday(&now, NULL); return (now.tv_sec)*1000000 + (now.tv_usec) - sys_start_time_hr; } #endif GF_EXPORT void gf_sleep(u32 ms) { #ifdef WIN32 Sleep(ms); #else s32 sel_err; struct timeval tv; #ifndef SLEEP_ABS_SELECT u32 prev, now, elapsed; #endif #ifdef SLEEP_ABS_SELECT tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; #else prev = gf_sys_clock(); #endif do { errno = 0; #ifndef SLEEP_ABS_SELECT now = gf_sys_clock(); elapsed = (now - prev); if ( elapsed >= ms ) { break; } prev = now; ms -= elapsed; tv.tv_sec = ms/1000; tv.tv_usec = (ms%1000)*1000; #endif sel_err = select(0, NULL, NULL, NULL, &tv); } while ( sel_err && (errno == EINTR) ); #endif } #ifndef gettimeofday #ifdef _WIN32_WCE #include <time.h> //#include <wce_time.h> /* * Author of first version (timeval.h): by Wu Yongwei * Author of Windows CE version: Mateusz Loskot (mateusz@loskot.net) * * All code here is considered in the public domain though we do wish our names * could be retained if anyone uses them. */ /* * Constants used internally by time functions. */ #ifndef _TM_DEFINED struct tm { int tm_sec; /* seconds after the minute - [0,59] */ int tm_min; /* minutes after the hour - [0,59] */ int tm_hour; /* hours since midnight - [0,23] */ int tm_mday; /* day of the month - [1,31] */ int tm_mon; /* months since January - [0,11] */ int tm_year; /* years since 1900 */ int tm_wday; /* days since Sunday - [0,6] */ int tm_yday; /* days since January 1 - [0,365] */ int tm_isdst; /* daylight savings time flag */ }; #define _TM_DEFINED #endif /* _TM_DEFINED */ #ifndef _TIMEZONE_DEFINED struct timezone { int tz_minuteswest; /* minutes W of Greenwich */ int tz_dsttime; /* type of dst correction */ }; #define _TIMEZONE_DEFINED #endif /* _TIMEZONE_DEFINED */ #if defined(_MSC_VER) || defined(__BORLANDC__) #define EPOCHFILETIME (116444736000000000i64) #else #define EPOCHFILETIME (116444736000000000LL) #endif int gettimeofday(struct timeval *tp, struct timezone *tzp) { SYSTEMTIME st; FILETIME ft; LARGE_INTEGER li; TIME_ZONE_INFORMATION tzi; __int64 t; static int tzflag; if (NULL != tp) { GetSystemTime(&st); SystemTimeToFileTime(&st, &ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; t = li.QuadPart; /* In 100-nanosecond intervals */ t -= EPOCHFILETIME; /* Offset to the Epoch time */ t /= 10; /* In microseconds */ tp->tv_sec = (long)(t / 1000000); tp->tv_usec = (long)(t % 1000000); } if (NULL != tzp) { GetTimeZoneInformation(&tzi); tzp->tz_minuteswest = tzi.Bias; if (tzi.StandardDate.wMonth != 0) { tzp->tz_minuteswest += tzi.StandardBias * 60; } if (tzi.DaylightDate.wMonth != 0) { tzp->tz_dsttime = 1; } else { tzp->tz_dsttime = 0; } } return 0; } #if _GPAC_UNUSED /* time between jan 1, 1601 and jan 1, 1970 in units of 100 nanoseconds FILETIME in Win32 is from jan 1, 1601 */ s32 __gettimeofday(struct timeval *tp, void *tz) { FILETIME ft; SYSTEMTIME st; s32 val; GetSystemTime(&st); SystemTimeToFileTime(&st, &ft); val = (s32) ((*(LONGLONG *) &ft - TIMESPEC_TO_FILETIME_OFFSET) / 10000000); tp->tv_sec = (u32) val; val = (s32 ) ((*(LONGLONG *) &ft - TIMESPEC_TO_FILETIME_OFFSET - ((LONGLONG) val * (LONGLONG) 10000000)) * 100); tp->tv_usec = val; return 0; } #endif #elif defined(WIN32) static s32 gettimeofday(struct timeval *tp, void *tz) { struct _timeb timebuffer; _ftime( &timebuffer ); tp->tv_sec = (long) (timebuffer.time); tp->tv_usec = timebuffer.millitm * 1000; return 0; } #endif #endif #ifdef _WIN32_WCE void CE_Assert(u32 valid, char *file, u32 line) { if (!valid) { char szBuf[2048]; u16 wcBuf[2048]; sprintf(szBuf, "File %s : line %d", file, line); CE_CharToWide(szBuf, wcBuf); MessageBox(NULL, wcBuf, _T("GPAC Assertion Failure"), MB_OK); exit(EXIT_FAILURE); } } void CE_WideToChar(unsigned short *w_str, char *str) { WideCharToMultiByte(CP_ACP, 0, w_str, -1, str, GF_MAX_PATH, NULL, NULL); } void CE_CharToWide(char *str, unsigned short *w_str) { MultiByteToWideChar(CP_ACP, 0, str, -1, w_str, GF_MAX_PATH); } #endif GF_EXPORT void gf_rand_init(Bool Reset) { if (Reset) { srand(1); } else { #if defined(_WIN32_WCE) srand( (u32) GetTickCount() ); #else srand( (u32) time(NULL) ); #endif } } GF_EXPORT u32 gf_rand() { return rand(); } #ifndef _WIN32_WCE #include <sys/stat.h> #endif GF_EXPORT void gf_utc_time_since_1970(u32 *sec, u32 *msec) { #if defined (WIN32) && !defined(_WIN32_WCE) struct _timeb tb; _ftime( &tb ); *sec = (u32) tb.time; *msec = tb.millitm; #else struct timeval tv; gettimeofday(&tv, NULL); *sec = (u32) tv.tv_sec; *msec = tv.tv_usec/1000; #endif } GF_EXPORT void gf_get_user_name(char *buf, u32 buf_size) { strcpy(buf, "mpeg4-user"); #if 0 s32 len; char *t; strcpy(buf, ""); len = 1024; GetUserName(buf, &len); if (!len) { t = getenv("USER"); if (t) strcpy(buf, t); } #endif #if 0 struct passwd *pw; pw = getpwuid(getuid()); strcpy(buf, ""); if (pw && pw->pw_name) strcpy(name, pw->pw_name); #endif } #ifndef WIN32 GF_EXPORT char * my_str_upr(char *str) { u32 i; for (i=0; i<strlen(str); i++) { str[i] = toupper(str[i]); } return str; } GF_EXPORT char * my_str_lwr(char *str) { u32 i; for (i=0; i<strlen(str); i++) { str[i] = tolower(str[i]); } return str; } #endif /*seems OK under mingw also*/ #ifdef WIN32 #ifdef _WIN32_WCE Bool gf_prompt_has_input() { return 0; } char gf_prompt_get_char() { return 0; } GF_EXPORT void gf_prompt_set_echo_off(Bool echo_off) { return; } #else #include <conio.h> #include <windows.h> Bool gf_prompt_has_input() { return kbhit(); } char gf_prompt_get_char() { return getchar(); } GF_EXPORT void gf_prompt_set_echo_off(Bool echo_off) { DWORD flags; HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); BOOL ret = GetConsoleMode(hStdin, &flags); if (!ret) { DWORD err = GetLastError(); GF_LOG(GF_LOG_ERROR, GF_LOG_CONSOLE, ("[Console] GetConsoleMode() return with the following error code: %d\n", err)); } if (echo_off) flags &= ~ENABLE_ECHO_INPUT; else flags |= ENABLE_ECHO_INPUT; SetConsoleMode(hStdin, flags); } #endif #else /*linux kbhit/getchar- borrowed on debian mailing lists, (author Mike Brownlow)*/ #include <termios.h> static struct termios t_orig, t_new; static s32 ch_peek = -1; static void init_keyboard() { tcgetattr(0, &t_orig); t_new = t_orig; t_new.c_lflag &= ~ICANON; t_new.c_lflag &= ~ECHO; t_new.c_lflag &= ~ISIG; t_new.c_cc[VMIN] = 1; t_new.c_cc[VTIME] = 0; tcsetattr(0, TCSANOW, &t_new); } static void close_keyboard(Bool new_line) { tcsetattr(0,TCSANOW, &t_orig); if (new_line) fprintf(stderr, "\n"); } GF_EXPORT void gf_prompt_set_echo_off(Bool echo_off) { init_keyboard(); if (echo_off) t_orig.c_lflag &= ~ECHO; else t_orig.c_lflag |= ECHO; close_keyboard(0); } GF_EXPORT Bool gf_prompt_has_input() { u8 ch; s32 nread; pid_t fg = tcgetpgrp(STDIN_FILENO); //we are not foreground nor piped (used for IDEs), can't read stdin if ((fg!=-1) && (fg != getpgrp())) { return 0; } init_keyboard(); if (ch_peek != -1) return 1; t_new.c_cc[VMIN]=0; tcsetattr(0, TCSANOW, &t_new); nread = (s32) read(0, &ch, 1); t_new.c_cc[VMIN]=1; tcsetattr(0, TCSANOW, &t_new); if(nread == 1) { ch_peek = ch; return 1; } close_keyboard(0); return 0; } GF_EXPORT char gf_prompt_get_char() { char ch; if (ch_peek != -1) { ch = ch_peek; ch_peek = -1; close_keyboard(1); return ch; } if (0==read(0,&ch,1)) ch = 0; close_keyboard(1); return ch; } #endif static u32 sys_init = 0; static u32 last_update_time = 0; static u64 last_process_k_u_time = 0; GF_SystemRTInfo the_rti; #if defined(_WIN32_WCE) static LARGE_INTEGER frequency , init_counter; static u64 last_total_k_u_time = 0; static u32 mem_usage_at_startup = 0; #ifndef GetCurrentPermissions DWORD GetCurrentPermissions(); #endif #ifndef SetProcPermissions void SetProcPermissions(DWORD ); #endif #elif defined(WIN32) static LARGE_INTEGER frequency , init_counter; static u64 last_proc_idle_time = 0; static u64 last_proc_k_u_time = 0; static HINSTANCE psapi_hinst = NULL; typedef BOOL(WINAPI* NTGetSystemTimes)(VOID *,VOID *,VOID *); NTGetSystemTimes MyGetSystemTimes = NULL; typedef BOOL(WINAPI* NTGetProcessMemoryInfo)(HANDLE,VOID *,DWORD); NTGetProcessMemoryInfo MyGetProcessMemoryInfo = NULL; typedef int(WINAPI* NTQuerySystemInfo)(ULONG,PVOID,ULONG,PULONG); NTQuerySystemInfo MyQuerySystemInfo = NULL; #ifndef PROCESS_MEMORY_COUNTERS typedef struct _PROCESS_MEMORY_COUNTERS { DWORD cb; DWORD PageFaultCount; SIZE_T PeakWorkingSetSize; SIZE_T WorkingSetSize; SIZE_T QuotaPeakPagedPoolUsage; SIZE_T QuotaPagedPoolUsage; SIZE_T QuotaPeakNonPagedPoolUsage; SIZE_T QuotaNonPagedPoolUsage; SIZE_T PagefileUsage; SIZE_T PeakPagefileUsage; } PROCESS_MEMORY_COUNTERS; #endif #ifndef SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { LARGE_INTEGER IdleTime; LARGE_INTEGER KernelTime; LARGE_INTEGER UserTime; LARGE_INTEGER Reserved1[2]; ULONG Reserved2; } SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION; #endif #else static u64 last_cpu_u_k_time = 0; static u64 last_cpu_idle_time = 0; static u64 mem_at_startup = 0; #endif #ifdef WIN32 static u32 (*OS_GetSysClock)(); u32 gf_sys_clock() { return OS_GetSysClock(); } static u64 (*OS_GetSysClockHR)(); u64 gf_sys_clock_high_res() { return OS_GetSysClockHR(); } #endif #ifdef WIN32 static u32 OS_GetSysClockHIGHRES() { LARGE_INTEGER now; QueryPerformanceCounter(&now); now.QuadPart -= init_counter.QuadPart; return (u32) ((now.QuadPart * 1000) / frequency.QuadPart); } static u64 OS_GetSysClockHIGHRES_FULL() { LARGE_INTEGER now; QueryPerformanceCounter(&now); now.QuadPart -= init_counter.QuadPart; return (u64) ((now.QuadPart * 1000000) / frequency.QuadPart); } static u32 OS_GetSysClockNORMAL() { #ifdef _WIN32_WCE return GetTickCount(); #else return timeGetTime(); #endif } static u64 OS_GetSysClockNORMAL_FULL() { u64 res = OS_GetSysClockNORMAL(); return res*1000; } #endif /* WIN32 */ #if defined(__sh__) /* Avoid exception for denormalized floating point values */ static int sh4_get_fpscr() { int ret; asm volatile ("sts fpscr,%0" : "=r" (ret)); return ret; } static void sh4_put_fpscr(int nv) { asm volatile ("lds %0,fpscr" : : "r" (nv)); } #define SH4_FPSCR_FR 0x00200000 #define SH4_FPSCR_SZ 0x00100000 #define SH4_FPSCR_PR 0x00080000 #define SH4_FPSCR_DN 0x00040000 #define SH4_FPSCR_RN 0x00000003 #define SH4_FPSCR_RN_N 0 #define SH4_FPSCR_RN_Z 1 extern int __fpscr_values[2]; void sh4_change_fpscr(int off, int on) { int b = sh4_get_fpscr(); off = ~off; off |= 0x00180000; on &= ~ 0x00180000; b &= off; b |= on; sh4_put_fpscr(b); __fpscr_values[0] &= off; __fpscr_values[0] |= on; __fpscr_values[1] &= off; __fpscr_values[1] |= on; } #endif #ifdef GPAC_MEMORY_TRACKING void gf_mem_enable_tracker(Bool enable_backtrace); #endif static u64 memory_at_gpac_startup = 0; static u32 gpac_argc = 0; const char **gpac_argv = NULL; GF_EXPORT void gf_sys_set_args(s32 argc, const char **argv) { //for OSX we allow overwrite of argc/argv due to different behavior between console-mode apps and GUI #if !defined(__DARWIN__) && !defined(__APPLE__) if (!gpac_argc && (argc>=0) ) #endif { gpac_argc = (u32) argc; gpac_argv = argv; } } GF_EXPORT u32 gf_sys_get_argc() { return gpac_argc; } GF_EXPORT const char *gf_sys_get_arg(u32 arg) { if (!gpac_argc || !gpac_argv) return NULL; if (arg>=gpac_argc) return NULL; return gpac_argv[arg]; } GF_EXPORT void gf_sys_init(GF_MemTrackerType mem_tracker_type) { if (!sys_init) { #if defined (WIN32) #if defined(_WIN32_WCE) MEMORYSTATUS ms; #else SYSTEM_INFO sysinfo; #endif #endif if (mem_tracker_type!=GF_MemTrackerNone) { #ifdef GPAC_MEMORY_TRACKING gf_mem_enable_tracker( (mem_tracker_type==GF_MemTrackerBackTrace) ? GF_TRUE : GF_FALSE); #endif } #ifndef GPAC_DISABLE_LOG /*by default log subsystem is initialized to error on all tools, and info on console to debug scripts*/ gf_log_set_tool_level(GF_LOG_ALL, GF_LOG_ERROR); gf_log_set_tool_level(GF_LOG_CONSOLE, GF_LOG_INFO); #endif #if defined(__sh__) /* Round all denormalized floatting point number to 0.0 */ sh4_change_fpscr(0,SH4_FPSCR_DN) ; #endif #if defined(WIN32) frequency.QuadPart = 0; /*clock setup*/ if (QueryPerformanceFrequency(&frequency)) { QueryPerformanceCounter(&init_counter); OS_GetSysClock = OS_GetSysClockHIGHRES; OS_GetSysClockHR = OS_GetSysClockHIGHRES_FULL; GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] using WIN32 performance timer\n")); } else { OS_GetSysClock = OS_GetSysClockNORMAL; OS_GetSysClockHR = OS_GetSysClockNORMAL_FULL; GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] using WIN32 regular timer\n")); } #ifndef _WIN32_WCE timeBeginPeriod(1); #endif GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] checking for run-time info tools")); #if defined(_WIN32_WCE) last_total_k_u_time = last_process_k_u_time = 0; last_update_time = 0; memset(&the_rti, 0, sizeof(GF_SystemRTInfo)); the_rti.pid = GetCurrentProcessId(); the_rti.nb_cores = 1; GlobalMemoryStatus(&ms); mem_usage_at_startup = ms.dwAvailPhys; #else /*cpu usage tools are buried in win32 dlls...*/ MyGetSystemTimes = (NTGetSystemTimes) GetProcAddress(GetModuleHandle("kernel32.dll"), "GetSystemTimes"); if (!MyGetSystemTimes) { MyQuerySystemInfo = (NTQuerySystemInfo) GetProcAddress(GetModuleHandle("ntdll.dll"), "NtQuerySystemInformation"); if (MyQuerySystemInfo) { GF_LOG(GF_LOG_INFO, GF_LOG_CORE, (" - CPU: QuerySystemInformation")); } } else { GF_LOG(GF_LOG_INFO, GF_LOG_CORE, (" - CPU: GetSystemsTimes")); } psapi_hinst = LoadLibrary("psapi.dll"); MyGetProcessMemoryInfo = (NTGetProcessMemoryInfo) GetProcAddress(psapi_hinst, "GetProcessMemoryInfo"); if (MyGetProcessMemoryInfo) { GF_LOG(GF_LOG_INFO, GF_LOG_CORE, (" - memory: GetProcessMemoryInfo")); } last_process_k_u_time = last_proc_idle_time = last_proc_k_u_time = 0; last_update_time = 0; memset(&the_rti, 0, sizeof(GF_SystemRTInfo)); the_rti.pid = GetCurrentProcessId(); GetSystemInfo( &sysinfo ); the_rti.nb_cores = sysinfo.dwNumberOfProcessors; #endif GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("\n")); #else /*linux threads and OSX...*/ last_process_k_u_time = 0; last_cpu_u_k_time = last_cpu_idle_time = 0; last_update_time = 0; memset(&the_rti, 0, sizeof(GF_SystemRTInfo)); the_rti.pid = getpid(); the_rti.nb_cores = (u32) sysconf( _SC_NPROCESSORS_ONLN ); sys_start_time = gf_sys_clock(); sys_start_time_hr = gf_sys_clock_high_res(); #endif GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] process id %d\n", the_rti.pid)); #ifndef _WIN32_WCE setlocale( LC_NUMERIC, "C" ); #endif } sys_init += 1; /*init RTI stats*/ if (!memory_at_gpac_startup) { GF_SystemRTInfo rti; if (gf_sys_get_rti(500, &rti, GF_RTI_SYSTEM_MEMORY_ONLY)) { memory_at_gpac_startup = rti.physical_memory_avail; GF_LOG(GF_LOG_INFO, GF_LOG_CORE, ("[core] System init OK - process id %d - %d MB physical RAM - %d cores\n", rti.pid, (u32) (rti.physical_memory/1024/1024), rti.nb_cores)); } else { memory_at_gpac_startup = 0; } } } GF_EXPORT void gf_sys_close() { if (sys_init > 0) { sys_init --; if (sys_init) return; /*prevent any call*/ last_update_time = 0xFFFFFFFF; #if defined(WIN32) && !defined(_WIN32_WCE) timeEndPeriod(1); MyGetSystemTimes = NULL; MyGetProcessMemoryInfo = NULL; MyQuerySystemInfo = NULL; if (psapi_hinst) FreeLibrary(psapi_hinst); psapi_hinst = NULL; #endif } } #ifdef GPAC_MEMORY_TRACKING extern size_t gpac_allocated_memory; extern size_t gpac_nb_alloc_blocs; #endif /*CPU and Memory Usage*/ #ifdef WIN32 Bool gf_sys_get_rti_os(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags) { #if defined(_WIN32_WCE) THREADENTRY32 tentry; u64 total_cpu_time, process_cpu_time; DWORD orig_perm; #endif MEMORYSTATUS ms; u64 creation, exit, kernel, user, process_k_u_time, proc_idle_time, proc_k_u_time; u32 entry_time; HANDLE hSnapShot; assert(sys_init); if (!rti) return GF_FALSE; proc_idle_time = proc_k_u_time = process_k_u_time = 0; entry_time = gf_sys_clock(); if (last_update_time && (entry_time - last_update_time < refresh_time_ms)) { memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return GF_FALSE; } if (flags & GF_RTI_SYSTEM_MEMORY_ONLY) { memset(rti, 0, sizeof(GF_SystemRTInfo)); rti->sampling_instant = last_update_time; GlobalMemoryStatus(&ms); rti->physical_memory = ms.dwTotalPhys; rti->physical_memory_avail = ms.dwAvailPhys; #ifdef GPAC_MEMORY_TRACKING rti->gpac_memory = (u64) gpac_allocated_memory; #endif return GF_TRUE; } #if defined (_WIN32_WCE) total_cpu_time = process_cpu_time = 0; /*get a snapshot of all running threads*/ orig_perm = GetCurrentPermissions(); SetProcPermissions(0xFFFFFFFF); hSnapShot = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0); if (hSnapShot) { tentry.dwSize = sizeof(THREADENTRY32); the_rti.thread_count = 0; /*note we always act as if GF_RTI_ALL_PROCESSES_TIMES flag is set, since there is no other way to enumerate threads from a process, and GetProcessTimes doesn't exist on CE*/ if (Thread32First(hSnapShot, &tentry)) { do { /*get thread times*/ if (GetThreadTimes( (HANDLE) tentry.th32ThreadID, (FILETIME *) &creation, (FILETIME *) &exit, (FILETIME *) &kernel, (FILETIME *) &user)) { total_cpu_time += user + kernel; if (tentry.th32OwnerProcessID==the_rti.pid) { process_cpu_time += user + kernel; the_rti.thread_count ++; } } } while (Thread32Next(hSnapShot, &tentry)); } CloseToolhelp32Snapshot(hSnapShot); } if (flags & GF_RTI_PROCESS_MEMORY) { HEAPLIST32 hlentry; HEAPENTRY32 hentry; the_rti.process_memory = 0; hlentry.dwSize = sizeof(HEAPLIST32); hSnapShot = CreateToolhelp32Snapshot(TH32CS_SNAPHEAPLIST, the_rti.pid); if (hSnapShot && Heap32ListFirst(hSnapShot, &hlentry)) { do { hentry.dwSize = sizeof(hentry); if (Heap32First(hSnapShot, &hentry, hlentry.th32ProcessID, hlentry.th32HeapID)) { do { the_rti.process_memory += hentry.dwBlockSize; } while (Heap32Next(hSnapShot, &hentry)); } } while (Heap32ListNext(hSnapShot, &hlentry)); } CloseToolhelp32Snapshot(hSnapShot); } SetProcPermissions(orig_perm); total_cpu_time /= 10; process_cpu_time /= 10; #else /*XP-SP1 and Win2003 servers only have GetSystemTimes support. This will give a better estimation of CPU usage since we can take into account the idle time*/ if (MyGetSystemTimes) { u64 u_time; MyGetSystemTimes(&proc_idle_time, &proc_k_u_time, &u_time); proc_k_u_time += u_time; proc_idle_time /= 10; proc_k_u_time /= 10; } /*same rq for NtQuerySystemInformation*/ else if (MyQuerySystemInfo) { DWORD ret; SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION info; MyQuerySystemInfo(0x8 /*SystemProcessorPerformanceInformation*/, &info, sizeof(info), &ret); if (ret && (ret<=sizeof(info))) { proc_idle_time = info.IdleTime.QuadPart / 10; proc_k_u_time = (info.KernelTime.QuadPart + info.UserTime.QuadPart) / 10; } } /*no special API available, ONLY FETCH TIMES if requested (may eat up some time)*/ else if (flags & GF_RTI_ALL_PROCESSES_TIMES) { PROCESSENTRY32 pentry; /*get a snapshot of all running threads*/ hSnapShot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); if (!hSnapShot) return GF_FALSE; pentry.dwSize = sizeof(PROCESSENTRY32); if (Process32First(hSnapShot, &pentry)) { do { HANDLE procH = NULL; if (pentry.th32ProcessID) procH = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pentry.th32ProcessID); if (procH && GetProcessTimes(procH, (FILETIME *) &creation, (FILETIME *) &exit, (FILETIME *) &kernel, (FILETIME *) &user) ) { user += kernel; proc_k_u_time += user; if (pentry.th32ProcessID==the_rti.pid) { process_k_u_time = user; //nb_threads = pentry.cntThreads; } } if (procH) CloseHandle(procH); } while (Process32Next(hSnapShot, &pentry)); } CloseHandle(hSnapShot); proc_k_u_time /= 10; } if (!process_k_u_time) { HANDLE procH = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, the_rti.pid); if (procH && GetProcessTimes(procH, (FILETIME *) &creation, (FILETIME *) &exit, (FILETIME *) &kernel, (FILETIME *) &user) ) { process_k_u_time = user + kernel; } if (procH) CloseHandle(procH); if (!process_k_u_time) return GF_FALSE; } process_k_u_time /= 10; /*this won't cost a lot*/ if (MyGetProcessMemoryInfo) { PROCESS_MEMORY_COUNTERS pmc; HANDLE procH = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, the_rti.pid); MyGetProcessMemoryInfo(procH, &pmc, sizeof (pmc)); the_rti.process_memory = pmc.WorkingSetSize; if (procH) CloseHandle(procH); } /*THIS IS VERY HEAVY (eats up mem and time) - only perform if requested*/ else if (flags & GF_RTI_PROCESS_MEMORY) { HEAPLIST32 hlentry; HEAPENTRY32 hentry; the_rti.process_memory = 0; hlentry.dwSize = sizeof(HEAPLIST32); hSnapShot = CreateToolhelp32Snapshot(TH32CS_SNAPHEAPLIST, the_rti.pid); if (hSnapShot && Heap32ListFirst(hSnapShot, &hlentry)) { do { hentry.dwSize = sizeof(hentry); if (Heap32First(&hentry, hlentry.th32ProcessID, hlentry.th32HeapID)) { do { the_rti.process_memory += hentry.dwBlockSize; } while (Heap32Next(&hentry)); } } while (Heap32ListNext(hSnapShot, &hlentry)); } CloseHandle(hSnapShot); } #endif the_rti.sampling_instant = last_update_time; if (last_update_time) { the_rti.sampling_period_duration = entry_time - last_update_time; the_rti.process_cpu_time_diff = (u32) ((process_k_u_time - last_process_k_u_time)/1000); #if defined(_WIN32_WCE) the_rti.total_cpu_time_diff = (u32) ((total_cpu_time - last_total_k_u_time)/1000); /*we're not that accurate....*/ if (the_rti.total_cpu_time_diff > the_rti.sampling_period_duration) the_rti.sampling_period_duration = the_rti.total_cpu_time_diff; /*rough values*/ the_rti.cpu_idle_time = the_rti.sampling_period_duration - the_rti.total_cpu_time_diff; if (!the_rti.sampling_period_duration) the_rti.sampling_period_duration=1; the_rti.total_cpu_usage = (u32) (100 * the_rti.total_cpu_time_diff / the_rti.sampling_period_duration); if (the_rti.total_cpu_time_diff + the_rti.cpu_idle_time==0) the_rti.total_cpu_time_diff ++; the_rti.process_cpu_usage = (u32) (100*the_rti.process_cpu_time_diff / (the_rti.total_cpu_time_diff + the_rti.cpu_idle_time) ); #else /*oops, we have no choice but to assume 100% cpu usage during this period*/ if (!proc_k_u_time) { the_rti.total_cpu_time_diff = the_rti.sampling_period_duration; proc_k_u_time = last_proc_k_u_time + the_rti.sampling_period_duration; the_rti.cpu_idle_time = 0; the_rti.total_cpu_usage = 100; if (the_rti.sampling_period_duration) the_rti.process_cpu_usage = (u32) (100*the_rti.process_cpu_time_diff / the_rti.sampling_period_duration); } else { u64 samp_sys_time, idle; the_rti.total_cpu_time_diff = (u32) ((proc_k_u_time - last_proc_k_u_time)/1000); /*we're not that accurate....*/ if (the_rti.total_cpu_time_diff > the_rti.sampling_period_duration) { the_rti.sampling_period_duration = the_rti.total_cpu_time_diff; } if (!proc_idle_time) proc_idle_time = last_proc_idle_time + (the_rti.sampling_period_duration - the_rti.total_cpu_time_diff); samp_sys_time = proc_k_u_time - last_proc_k_u_time; idle = proc_idle_time - last_proc_idle_time; the_rti.cpu_idle_time = (u32) (idle/1000); if (samp_sys_time) { the_rti.total_cpu_usage = (u32) ( (samp_sys_time - idle) / (samp_sys_time / 100) ); the_rti.process_cpu_usage = (u32) (100*the_rti.process_cpu_time_diff / (samp_sys_time/1000)); } } #endif } last_update_time = entry_time; last_process_k_u_time = process_k_u_time; GlobalMemoryStatus(&ms); the_rti.physical_memory = ms.dwTotalPhys; #ifdef GPAC_MEMORY_TRACKING the_rti.gpac_memory = (u64) gpac_allocated_memory; #endif the_rti.physical_memory_avail = ms.dwAvailPhys; #if defined(_WIN32_WCE) last_total_k_u_time = total_cpu_time; if (!the_rti.process_memory) the_rti.process_memory = mem_usage_at_startup - ms.dwAvailPhys; #else last_proc_idle_time = proc_idle_time; last_proc_k_u_time = proc_k_u_time; #endif if (!the_rti.gpac_memory) the_rti.gpac_memory = the_rti.process_memory; memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return GF_TRUE; } #elif defined(GPAC_CONFIG_DARWIN) && !defined(GPAC_IPHONE) #include <sys/types.h> #include <sys/sysctl.h> #include <sys/vmmeter.h> #include <mach/mach_init.h> #include <mach/mach_host.h> #include <mach/mach_port.h> #include <mach/mach_traps.h> #include <mach/task_info.h> #include <mach/thread_info.h> #include <mach/thread_act.h> #include <mach/vm_region.h> #include <mach/vm_map.h> #include <mach/task.h> #if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1060 #include <mach/shared_region.h> #else #include <mach/shared_memory_server.h> #endif #include <mach/mach_error.h> static u64 total_physical_memory = 0; Bool gf_sys_get_rti_os(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags) { size_t length; u32 entry_time, i, percent; int mib[6]; u64 result; int pagesize; u64 process_u_k_time; double utime, stime; vm_statistics_data_t vmstat; task_t task; kern_return_t error; thread_array_t thread_table; unsigned table_size; thread_basic_info_t thi; thread_basic_info_data_t thi_data; struct task_basic_info ti; mach_msg_type_number_t count = HOST_VM_INFO_COUNT, size = sizeof(ti); entry_time = gf_sys_clock(); if (last_update_time && (entry_time - last_update_time < refresh_time_ms)) { memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return 0; } mib[0] = CTL_HW; mib[1] = HW_PAGESIZE; length = sizeof(pagesize); if (sysctl(mib, 2, &pagesize, &length, NULL, 0) < 0) { return 0; } if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vmstat, &count) != KERN_SUCCESS) { return 0; } the_rti.physical_memory = (vmstat.wire_count + vmstat.active_count + vmstat.inactive_count + vmstat.free_count)* pagesize; the_rti.physical_memory_avail = vmstat.free_count * pagesize; if (!total_physical_memory) { mib[0] = CTL_HW; mib[1] = HW_MEMSIZE; length = sizeof(u64); if (sysctl(mib, 2, &result, &length, NULL, 0) >= 0) { total_physical_memory = result; } } the_rti.physical_memory = total_physical_memory; error = task_for_pid(mach_task_self(), the_rti.pid, &task); if (error) { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] Cannot get process task for PID %d: error %d\n", the_rti.pid, error)); return 0; } error = task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&ti, &size); if (error) { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] Cannot get process task info (PID %d): error %d\n", the_rti.pid, error)); return 0; } percent = 0; utime = ti.user_time.seconds + ti.user_time.microseconds * 1e-6; stime = ti.system_time.seconds + ti.system_time.microseconds * 1e-6; error = task_threads(task, &thread_table, &table_size); if (error != KERN_SUCCESS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] Cannot get threads task for PID %d: error %d\n", the_rti.pid, error)); return 0; } thi = &thi_data; for (i = 0; i != table_size; ++i) { count = THREAD_BASIC_INFO_COUNT; error = thread_info(thread_table[i], THREAD_BASIC_INFO, (thread_info_t)thi, &count); if (error != KERN_SUCCESS) { mach_error("[RTI] Unexpected thread_info() call return", error); GF_LOG(GF_LOG_WARNING, GF_LOG_CORE, ("[RTI] Unexpected thread info for PID %d\n", the_rti.pid)); break; } if ((thi->flags & TH_FLAGS_IDLE) == 0) { utime += thi->user_time.seconds + thi->user_time.microseconds * 1e-6; stime += thi->system_time.seconds + thi->system_time.microseconds * 1e-6; percent += (u32) (100 * (double)thi->cpu_usage / TH_USAGE_SCALE); } } vm_deallocate(mach_task_self(), (vm_offset_t)thread_table, table_size * sizeof(thread_array_t)); mach_port_deallocate(mach_task_self(), task); process_u_k_time = utime + stime; the_rti.sampling_instant = last_update_time; if (last_update_time) { the_rti.sampling_period_duration = (entry_time - last_update_time); the_rti.process_cpu_time_diff = (process_u_k_time - last_process_k_u_time) * 10; the_rti.total_cpu_time_diff = the_rti.sampling_period_duration; /*TODO*/ the_rti.cpu_idle_time = 0; the_rti.total_cpu_usage = 0; if (!the_rti.process_cpu_time_diff) the_rti.process_cpu_time_diff = the_rti.total_cpu_time_diff; the_rti.process_cpu_usage = percent; } else { mem_at_startup = the_rti.physical_memory_avail; } the_rti.process_memory = mem_at_startup - the_rti.physical_memory_avail; #ifdef GPAC_MEMORY_TRACKING the_rti.gpac_memory = gpac_allocated_memory; #endif last_process_k_u_time = process_u_k_time; last_cpu_idle_time = 0; last_update_time = entry_time; memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return 1; } //linux #else Bool gf_sys_get_rti_os(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags) { u32 entry_time; u64 process_u_k_time; u32 u_k_time, idle_time; #if 0 char szProc[100]; #endif char line[2048]; FILE *f; assert(sys_init); entry_time = gf_sys_clock(); if (last_update_time && (entry_time - last_update_time < refresh_time_ms)) { memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return 0; } u_k_time = idle_time = 0; f = gf_fopen("/proc/stat", "r"); if (f) { u32 k_time, nice_time, u_time; if (fgets(line, 128, f) != NULL) { if (sscanf(line, "cpu %u %u %u %u\n", &u_time, &k_time, &nice_time, &idle_time) == 4) { u_k_time = u_time + k_time + nice_time; } } gf_fclose(f); } process_u_k_time = 0; the_rti.process_memory = 0; /*FIXME? under LinuxThreads this will only fetch stats for the calling thread, we would have to enumerate /proc to get the complete CPU usage of all therads of the process...*/ #if 0 sprintf(szProc, "/proc/%d/stat", the_rti.pid); f = gf_fopen(szProc, "r"); if (f) { fflush(f); if (fgets(line, 2048, f) != NULL) { char state; char *start; long cutime, cstime, priority, nice, itrealvalue, rss; int exit_signal, processor; unsigned long flags, minflt, cminflt, majflt, cmajflt, utime, stime,starttime, vsize, rlim, startcode, endcode, startstack, kstkesp, kstkeip, signal, blocked, sigignore, sigcatch, wchan, nswap, cnswap, rem; int ppid, pgrp ,session, tty_nr, tty_pgrp, res; start = strchr(line, ')'); if (start) start += 2; else { start = strchr(line, ' '); start++; } res = sscanf(start,"%c %d %d %d %d %d %lu %lu %lu %lu \ %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu \ %lu %ld %lu %lu %lu %lu %lu %lu %lu %lu \ %lu %lu %lu %lu %lu %d %d", &state, &ppid, &pgrp, &session, &tty_nr, &tty_pgrp, &flags, &minflt, &cminflt, &majflt, &cmajflt, &utime, &stime, &cutime, &cstime, &priority, &nice, &itrealvalue, &rem, &starttime, &vsize, &rss, &rlim, &startcode, &endcode, &startstack, &kstkesp, &kstkeip, &signal, &blocked, &sigignore, &sigcatch, &wchan, &nswap, &cnswap, &exit_signal, &processor); if (res) process_u_k_time = (u64) (cutime + cstime); else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] PROC %s parse error\n", szProc)); } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] error reading pid/stat\n\n", szProc)); } gf_fclose(f); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] cannot open %s\n", szProc)); } sprintf(szProc, "/proc/%d/status", the_rti.pid); f = gf_fopen(szProc, "r"); if (f) { while (fgets(line, 1024, f) != NULL) { if (!strnicmp(line, "VmSize:", 7)) { sscanf(line, "VmSize: %"LLD" kB", &the_rti.process_memory); the_rti.process_memory *= 1024; } } gf_fclose(f); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] cannot open %s\n", szProc)); } #endif #ifndef GPAC_IPHONE the_rti.physical_memory = the_rti.physical_memory_avail = 0; f = gf_fopen("/proc/meminfo", "r"); if (f) { while (fgets(line, 1024, f) != NULL) { if (!strnicmp(line, "MemTotal:", 9)) { sscanf(line, "MemTotal: "LLU" kB", &the_rti.physical_memory); the_rti.physical_memory *= 1024; } else if (!strnicmp(line, "MemFree:", 8)) { sscanf(line, "MemFree: "LLU" kB", &the_rti.physical_memory_avail); the_rti.physical_memory_avail *= 1024; break; } } gf_fclose(f); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[RTI] cannot open /proc/meminfo\n")); } #endif the_rti.sampling_instant = last_update_time; if (last_update_time) { the_rti.sampling_period_duration = (entry_time - last_update_time); the_rti.process_cpu_time_diff = (u32) (process_u_k_time - last_process_k_u_time) * 10; /*oops, we have no choice but to assume 100% cpu usage during this period*/ if (!u_k_time) { the_rti.total_cpu_time_diff = the_rti.sampling_period_duration; u_k_time = (u32) (last_cpu_u_k_time + the_rti.sampling_period_duration); the_rti.cpu_idle_time = 0; the_rti.total_cpu_usage = 100; if (!the_rti.process_cpu_time_diff) the_rti.process_cpu_time_diff = the_rti.total_cpu_time_diff; the_rti.process_cpu_usage = (u32) ( 100 * the_rti.process_cpu_time_diff / the_rti.sampling_period_duration); } else { u64 samp_sys_time; /*move to ms (/proc/stat gives times in 100 ms unit*/ the_rti.total_cpu_time_diff = (u32) (u_k_time - last_cpu_u_k_time)*10; /*we're not that accurate....*/ if (the_rti.total_cpu_time_diff > the_rti.sampling_period_duration) the_rti.sampling_period_duration = the_rti.total_cpu_time_diff; if (!idle_time) idle_time = (the_rti.sampling_period_duration - the_rti.total_cpu_time_diff)/10; samp_sys_time = u_k_time - last_cpu_u_k_time; the_rti.cpu_idle_time = (u32) (idle_time - last_cpu_idle_time); the_rti.total_cpu_usage = (u32) ( 100 * samp_sys_time / (the_rti.cpu_idle_time + samp_sys_time ) ); /*move to ms (/proc/stat gives times in 100 ms unit*/ the_rti.cpu_idle_time *= 10; if (!the_rti.process_cpu_time_diff) the_rti.process_cpu_time_diff = the_rti.total_cpu_time_diff; the_rti.process_cpu_usage = (u32) ( 100 * the_rti.process_cpu_time_diff / (the_rti.cpu_idle_time + 10*samp_sys_time ) ); } } else { mem_at_startup = the_rti.physical_memory_avail; } the_rti.process_memory = mem_at_startup - the_rti.physical_memory_avail; #ifdef GPAC_MEMORY_TRACKING the_rti.gpac_memory = gpac_allocated_memory; #endif last_process_k_u_time = process_u_k_time; last_cpu_idle_time = idle_time; last_cpu_u_k_time = u_k_time; last_update_time = entry_time; memcpy(rti, &the_rti, sizeof(GF_SystemRTInfo)); return 1; } #endif GF_EXPORT Bool gf_sys_get_rti(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags) { Bool res = gf_sys_get_rti_os(refresh_time_ms, rti, flags); if (res) { if (!rti->process_memory) rti->process_memory = memory_at_gpac_startup - rti->physical_memory_avail; if (!rti->gpac_memory) rti->gpac_memory = memory_at_gpac_startup - rti->physical_memory_avail; } return res; } GF_EXPORT char * gf_get_default_cache_directory() { char szPath[GF_MAX_PATH]; char* root_tmp; size_t len; #ifdef _WIN32_WCE strcpy(szPath, "\\windows\\temp" ); #elif defined(WIN32) GetTempPath(GF_MAX_PATH, szPath); #else strcpy(szPath, "/tmp"); #endif root_tmp = gf_strdup(szPath); len = strlen(szPath); if (szPath[len-1] != GF_PATH_SEPARATOR) { szPath[len] = GF_PATH_SEPARATOR; szPath[len+1] = 0; } strcat(szPath, "gpac_cache"); if ( !gf_dir_exists(szPath) && gf_mkdir(szPath)!=GF_OK ) { return root_tmp; } gf_free(root_tmp); return gf_strdup(szPath); } GF_EXPORT Bool gf_sys_get_battery_state(Bool *onBattery, u32 *onCharge, u32*level, u32 *batteryLifeTime, u32 *batteryFullLifeTime) { #if defined(_WIN32_WCE) SYSTEM_POWER_STATUS_EX sps; GetSystemPowerStatusEx(&sps, 0); if (onBattery) *onBattery = sps.ACLineStatus ? 0 : 1; if (onCharge) *onCharge = (sps.BatteryFlag & BATTERY_FLAG_CHARGING) ? 1 : 0; if (level) *level = sps.BatteryLifePercent; if (batteryLifeTime) *batteryLifeTime = sps.BatteryLifeTime; if (batteryFullLifeTime) *batteryFullLifeTime = sps.BatteryFullLifeTime; #elif defined(WIN32) SYSTEM_POWER_STATUS sps; GetSystemPowerStatus(&sps); if (onBattery) *onBattery = sps.ACLineStatus ? GF_FALSE : GF_TRUE; if (onCharge) *onCharge = (sps.BatteryFlag & BATTERY_FLAG_CHARGING) ? 1 : 0; if (level) *level = sps.BatteryLifePercent; if (batteryLifeTime) *batteryLifeTime = sps.BatteryLifeTime; if (batteryFullLifeTime) *batteryFullLifeTime = sps.BatteryFullLifeTime; #endif return GF_TRUE; } struct GF_GlobalLock { const char * resourceName; }; #ifndef WIN32 #define CPF_CLOEXEC 1 #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> struct _GF_GlobalLock_opaque { char * resourceName; char * pidFile; int fd; }; GF_GlobalLock * gf_create_PID_file( const char * resourceName ) { const char * prefix = "/gpac_lock_"; const char * dir = gf_get_default_cache_directory(); char * pidfile; int flags; int status; pidfile = gf_malloc(strlen(dir)+strlen(prefix)+strlen(resourceName)+1); strcpy(pidfile, dir); strcat(pidfile, prefix); /* Use only valid names for file */ { const char *res; char * pid = &(pidfile[strlen(pidfile)]); for (res = resourceName; *res ; res++) { if (*res >= 'A' && *res <= 'z') *pid = * res; else *pid = '_'; pid++; } *pid = '\0'; } int fd = open(pidfile, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (fd == -1) goto exit; /* Get the flags */ flags = fcntl(fd, F_GETFD); if (flags == -1) { goto exit; } /* Set FD_CLOEXEC, so exclusive lock will be removed on exit, so even if GPAC crashes, * lock will be allowed for next instance */ flags |= FD_CLOEXEC; /* Now, update the flags */ if (fcntl(fd, F_SETFD, flags) == -1) { goto exit; } /* Now, we try to lock the file */ { struct flock fl; fl.l_type = F_WRLCK; fl.l_whence = SEEK_SET; fl.l_start = fl.l_len = 0; status = fcntl(fd, F_SETLK, &fl); } if (status == -1) { goto exit; } if (ftruncate(fd, 0) == -1) { goto exit; } /* Write the PID */ { int sz = 100; char * buf = gf_malloc( sz ); sz = snprintf(buf, sz, "%ld\n", (long) getpid()); if (write(fd, buf, sz) != sz) { gf_free(buf); goto exit; } } sync(); { GF_GlobalLock * lock = gf_malloc( sizeof(GF_GlobalLock)); lock->resourceName = gf_strdup(resourceName); lock->pidFile = pidfile; lock->fd = fd; return lock; } exit: if (fd >= 0) close(fd); return NULL; } #else /* WIN32 */ struct _GF_GlobalLock_opaque { char * resourceName; HANDLE hMutex; /*a named mutex is a system-mode object on windows*/ }; #endif GF_EXPORT GF_GlobalLock * gf_global_resource_lock(const char * resourceName) { #ifdef WIN32 #ifdef _WIN32_WCE unsigned short sWResourceName[MAX_PATH]; #endif DWORD lastErr; GF_GlobalLock *lock = gf_malloc(sizeof(GF_GlobalLock)); lock->resourceName = gf_strdup(resourceName); /*first ensure mutex is created*/ #ifdef _WIN32_WCE CE_CharToWide((char *)resourceName, sWResourceName); lock->hMutex = CreateMutex(NULL, TRUE, sWResourceName); #else lock->hMutex = CreateMutex(NULL, TRUE, resourceName); #endif lastErr = GetLastError(); if (lastErr && lastErr == ERROR_ALREADY_EXISTS) return NULL; if (!lock->hMutex) { GF_LOG(GF_LOG_ERROR, GF_LOG_MUTEX, ("[Mutex] Couldn't create mutex for global lock: %d\n", lastErr)); return NULL; } /*then lock it*/ switch (WaitForSingleObject(lock->hMutex, INFINITE)) { case WAIT_ABANDONED: case WAIT_TIMEOUT: assert(0); /*serious error: someone has modified the object elsewhere*/ GF_LOG(GF_LOG_ERROR, GF_LOG_MUTEX, ("[Mutex] Couldn't get the global lock\n")); gf_global_resource_unlock(lock); return NULL; } return lock; #else /* WIN32 */ return gf_create_PID_file(resourceName); #endif /* WIN32 */ } /*! * Unlock a previouly locked resource * \param lock The resource to unlock * \return GF_OK if evertything went fine */ GF_EXPORT GF_Err gf_global_resource_unlock(GF_GlobalLock * lock) { if (!lock) return GF_BAD_PARAM; #ifndef WIN32 assert( lock->pidFile); close(lock->fd); if (unlink(lock->pidFile)) perror("Failed to unlink lock file"); gf_free(lock->pidFile); lock->pidFile = NULL; lock->fd = -1; #else /* WIN32 */ { /*MSDN: "The mutex object is destroyed when its last handle has been closed."*/ BOOL ret = ReleaseMutex(lock->hMutex); if (!ret) { DWORD err = GetLastError(); GF_LOG(GF_LOG_ERROR, GF_LOG_MUTEX, ("[Mutex] Couldn't release mutex for global lock: %d\n", err)); } ret = CloseHandle(lock->hMutex); if (!ret) { DWORD err = GetLastError(); GF_LOG(GF_LOG_ERROR, GF_LOG_MUTEX, ("[Mutex] Couldn't destroy mutex for global lock: %d\n", err)); } } #endif if (lock->resourceName) gf_free(lock->resourceName); lock->resourceName = NULL; gf_free(lock); return GF_OK; } #ifdef GPAC_ANDROID fm_callback_func fm_cbk = NULL; static void *fm_cbk_obj = NULL; void gf_fm_request_set_callback(void *cbk_obj, fm_callback_func cbk_func) { fm_cbk = cbk_func; fm_cbk_obj = cbk_obj; } void gf_fm_request_call(u32 type, u32 param, int *value) { if (fm_cbk) fm_cbk(fm_cbk_obj, type, param, value); } #endif //GPAC_ANDROID GF_EXPORT s32 gf_gettimeofday(struct timeval *tp, void *tz) { return gettimeofday(tp, tz); } static u32 ntp_shift = GF_NTP_SEC_1900_TO_1970; GF_EXPORT void gf_net_set_ntp_shift(s32 shift) { ntp_shift = GF_NTP_SEC_1900_TO_1970 + shift; } /* NTP tools */ GF_EXPORT void gf_net_get_ntp(u32 *sec, u32 *frac) { u64 frac_part; struct timeval now; gettimeofday(&now, NULL); if (sec) { *sec = (u32) (now.tv_sec) + ntp_shift; } if (frac) { frac_part = now.tv_usec * 0xFFFFFFFFULL; frac_part /= 1000000; *frac = (u32) ( frac_part ); } } GF_EXPORT u64 gf_net_get_ntp_ts() { u64 res; u32 sec, frac; gf_net_get_ntp(&sec, &frac); res = sec; res<<= 32; res |= frac; return res; } GF_EXPORT s32 gf_net_get_ntp_diff_ms(u64 ntp) { u32 remote_s, remote_f, local_s, local_f; s64 local, remote; remote_s = (ntp >> 32); remote_f = (u32) (ntp & 0xFFFFFFFFULL); gf_net_get_ntp(&local_s, &local_f); local = local_s; local *= 1000; local += ((u64) local_f)*1000 / 0xFFFFFFFFULL; remote = remote_s; remote *= 1000; remote += ((u64) remote_f)*1000 / 0xFFFFFFFFULL; return (s32) (local - remote); } GF_EXPORT s32 gf_net_get_timezone() { #if defined(_WIN32_WCE) return 0; #else //this has been commented due to some reports of broken implementation on some systems ... // s32 val = timezone; // return val; /*FIXME - avoid errors at midnight when estimating timezone this does not work !!*/ s32 t_timezone; struct tm t_gmt, t_local; time_t t_time; t_time = time(NULL); t_gmt = *gmtime(&t_time); t_local = *localtime(&t_time); t_timezone = (t_gmt.tm_hour - t_local.tm_hour) * 3600 + (t_gmt.tm_min - t_local.tm_min) * 60; return t_timezone; #endif } //no mkgmtime on mingw..., use our own #if (defined(WIN32) && defined(__GNUC__)) static Bool leap_year(u32 year) { year += 1900; return (year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0) ? GF_TRUE : GF_FALSE; } static time_t gf_mktime_utc(struct tm *tm) { static const u32 days_per_month[2][12] = { {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} }; time_t time=0; int i; for (i=70; i<tm->tm_year; i++) { time += leap_year(i) ? 366 : 365; } for (i=0; i<tm->tm_mon; ++i) { time += days_per_month[leap_year(tm->tm_year)][i]; } time += tm->tm_mday - 1; time *= 24; time += tm->tm_hour; time *= 60; time += tm->tm_min; time *= 60; time += tm->tm_sec; return time; } #elif defined(WIN32) static time_t gf_mktime_utc(struct tm *tm) { return _mkgmtime(tm); } #elif defined(GPAC_ANDROID) #include <time64.h> #if defined(__LP64__) static time_t gf_mktime_utc(struct tm *tm) { return timegm64(tm); } #else static time_t gf_mktime_utc(struct tm *tm) { static const time_t kTimeMax = ~(1L << (sizeof(time_t) * CHAR_BIT - 1)); static const time_t kTimeMin = (1L << (sizeof(time_t) * CHAR_BIT - 1)); time64_t result = timegm64(tm); if (result < kTimeMin || result > kTimeMax) return -1; return result; } #endif #else static time_t gf_mktime_utc(struct tm *tm) { return timegm(tm); } #endif GF_EXPORT u64 gf_net_parse_date(const char *val) { u64 current_time; char szDay[50], szMonth[50]; u32 year, month, day, h, m, s, ms; s32 oh, om; Float secs; Bool neg_time_zone = GF_FALSE; #ifdef _WIN32_WCE SYSTEMTIME syst; FILETIME filet; #else struct tm t; memset(&t, 0, sizeof(struct tm)); #endif szDay[0] = szMonth[0] = 0; year = month = day = h = m = s = 0; oh = om = 0; secs = 0; if (sscanf(val, "%d-%d-%dT%d:%d:%gZ", &year, &month, &day, &h, &m, &secs) == 6) { } else if (sscanf(val, "%d-%d-%dT%d:%d:%g-%d:%d", &year, &month, &day, &h, &m, &secs, &oh, &om) == 8) { neg_time_zone = GF_TRUE; } else if (sscanf(val, "%d-%d-%dT%d:%d:%g+%d:%d", &year, &month, &day, &h, &m, &secs, &oh, &om) == 8) { } else if (sscanf(val, "%3s, %d %3s %d %d:%d:%d", szDay, &day, szMonth, &year, &h, &m, &s)==7) { secs = (Float) s; } else if (sscanf(val, "%9s, %d-%3s-%d %02d:%02d:%02d GMT", szDay, &day, szMonth, &year, &h, &m, &s)==7) { secs = (Float) s; } else if (sscanf(val, "%3s %3s %d %02d:%02d:%02d %d", szDay, szMonth, &day, &year, &h, &m, &s)==7) { secs = (Float) s; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[Core] Cannot parse date string %s\n", val)); return 0; } if (month) { month -= 1; } else { if (!strcmp(szMonth, "Jan")) month = 0; else if (!strcmp(szMonth, "Feb")) month = 1; else if (!strcmp(szMonth, "Mar")) month = 2; else if (!strcmp(szMonth, "Apr")) month = 3; else if (!strcmp(szMonth, "May")) month = 4; else if (!strcmp(szMonth, "Jun")) month = 5; else if (!strcmp(szMonth, "Jul")) month = 6; else if (!strcmp(szMonth, "Aug")) month = 7; else if (!strcmp(szMonth, "Sep")) month = 8; else if (!strcmp(szMonth, "Oct")) month = 9; else if (!strcmp(szMonth, "Nov")) month = 10; else if (!strcmp(szMonth, "Dec")) month = 11; } #ifdef _WIN32_WCE memset(&syst, 0, sizeof(SYSTEMTIME)); syst.wYear = year; syst.wMonth = month + 1; syst.wDay = day; syst.wHour = h; syst.wMinute = m; syst.wSecond = (u32) secs; SystemTimeToFileTime(&syst, &filet); current_time = (u64) ((*(LONGLONG *) &filet - TIMESPEC_TO_FILETIME_OFFSET) / 10000000); #else t.tm_year = year>1000 ? year-1900 : year; t.tm_mday = day; t.tm_hour = h; t.tm_min = m; t.tm_sec = (u32) secs; t.tm_mon = month; if (strlen(szDay) ) { if (!strcmp(szDay, "Mon") || !strcmp(szDay, "Monday")) t.tm_wday = 0; else if (!strcmp(szDay, "Tue") || !strcmp(szDay, "Tuesday")) t.tm_wday = 1; else if (!strcmp(szDay, "Wed") || !strcmp(szDay, "Wednesday")) t.tm_wday = 2; else if (!strcmp(szDay, "Thu") || !strcmp(szDay, "Thursday")) t.tm_wday = 3; else if (!strcmp(szDay, "Fri") || !strcmp(szDay, "Friday")) t.tm_wday = 4; else if (!strcmp(szDay, "Sat") || !strcmp(szDay, "Saturday")) t.tm_wday = 5; else if (!strcmp(szDay, "Sun") || !strcmp(szDay, "Sunday")) t.tm_wday = 6; } current_time = gf_mktime_utc(&t); if ((s64) current_time == -1) { //use 1 ms return 1; } if (current_time == 0) { //use 1 ms return 1; } #endif if (om || oh) { s32 diff = (60*oh + om)*60; if (neg_time_zone) diff = -diff; current_time = current_time + diff; } current_time *= 1000; ms = (u32) ( (secs - (u32) secs) * 1000); return current_time + ms; } GF_EXPORT u64 gf_net_get_utc() { u64 current_time; Double msec; u32 sec, frac; gf_net_get_ntp(&sec, &frac); current_time = sec - GF_NTP_SEC_1900_TO_1970; current_time *= 1000; msec = frac*1000.0; msec /= 0xFFFFFFFF; current_time += (u64) msec; return current_time; } GF_EXPORT GF_Err gf_bin128_parse(const char *string, bin128 value) { u32 len; u32 i=0; if (!strnicmp(string, "0x", 2)) string += 2; len = (u32) strlen(string); if (len >= 32) { u32 j; for (j=0; j<len; j+=2) { u32 v; char szV[5]; while (string[j] && !isalnum(string[j])) j++; if (!string[j]) break; sprintf(szV, "%c%c", string[j], string[j+1]); sscanf(szV, "%x", &v); value[i] = v; i++; if (i > 15) { // force error check below i++; break; } } } if (i != 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CORE, ("[CORE] 128bit blob is not 16-bytes long: %s\n", string)); return GF_BAD_PARAM; } return GF_OK; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_798_0
crossvul-cpp_data_bad_5512_0
/* $Id$ */ #include <stdio.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gd_intern.h" /* 2.03: don't include zlib here or we can't build without PNG */ #include "gd.h" #include "gdhelpers.h" #include "gd_color.h" #include "gd_errors.h" /* 2.0.12: this now checks the clipping rectangle */ #define gdImageBoundsSafeMacro(im, x, y) (!((((y) < (im)->cy1) || ((y) > (im)->cy2)) || (((x) < (im)->cx1) || ((x) > (im)->cx2)))) #ifdef _OSD_POSIX /* BS2000 uses the EBCDIC char set instead of ASCII */ #define CHARSET_EBCDIC #define __attribute__(any) /*nothing */ #endif /*_OSD_POSIX*/ #ifndef CHARSET_EBCDIC #define ASC(ch) ch #else /*CHARSET_EBCDIC */ #define ASC(ch) gd_toascii[(unsigned char)ch] static const unsigned char gd_toascii[256] = { /*00 */ 0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f, 0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /*................ */ /*10 */ 0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97, 0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f, /*................ */ /*20 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, /*................ */ /*30 */ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04, 0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, /*................ */ /*40 */ 0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5, 0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, /* .........`.<(+| */ /*50 */ 0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef, 0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f, /*&.........!$*);. */ /*60 */ 0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5, 0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f, /*-/........^,%_>?*/ /*70 */ 0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf, 0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, /*..........:#@'=" */ /*80 */ 0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1, /*.abcdefghi...... */ /*90 */ 0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4, /*.jklmnopqr...... */ /*a0 */ 0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae, /*..stuvwxyz...... */ /*b0 */ 0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc, 0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7, /*...........[\].. */ /*c0 */ 0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5, /*.ABCDEFGHI...... */ /*d0 */ 0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff, /*.JKLMNOPQR...... */ /*e0 */ 0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5, /*..STUVWXYZ...... */ /*f0 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e /*0123456789.{.}.~ */ }; #endif /*CHARSET_EBCDIC */ extern const int gdCosT[]; extern const int gdSinT[]; void gd_stderr_error(int priority, const char *format, va_list args) { switch (priority) { case GD_ERROR: fputs("GD Error: ", stderr); break; case GD_WARNING: fputs("GD Warning: ", stderr); break; case GD_NOTICE: fputs("GD Notice: ", stderr); break; case GD_INFO: fputs("GD Info: ", stderr); break; case GD_DEBUG: fputs("GD Debug: ", stderr); break; } vfprintf(stderr, format, args); fflush(stderr); } static gdErrorMethod gd_error_method = gd_stderr_error; void gd_error(const char *format, ...) { va_list args; va_start(args, format); gd_error_ex(GD_WARNING, format, args); va_end(args); } void gd_error_ex(int priority, const char *format, ...) { va_list args; va_start(args, format); if (gd_error_method) { gd_error_method(priority, format, args); } va_end(args); } BGD_DECLARE(void) gdSetErrorMethod(gdErrorMethod error_method) { gd_error_method = error_method; } BGD_DECLARE(void) gdClearErrorMethod(void) { gd_error_method = gd_stderr_error; } static void gdImageBrushApply (gdImagePtr im, int x, int y); static void gdImageTileApply (gdImagePtr im, int x, int y); BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y); /* Function: gdImageCreate gdImageCreate is called to create palette-based images, with no more than 256 colors. The image must eventually be destroyed using gdImageDestroy(). Parameters: sx - The image width. sy - The image height. Returns: A pointer to the new image or NULL if an error occurred. Example: > gdImagePtr im; > im = gdImageCreate(64, 64); > // ... Use the image ... > gdImageDestroy(im); See Also: <gdImageCreateTrueColor> */ BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sizeof (unsigned char *), sy)) { return NULL; } if (overflow2(sizeof (unsigned char *), sx)) { return NULL; } im = (gdImage *) gdCalloc(1, sizeof(gdImage)); if (!im) { return NULL; } /* Row-major ever since gd 1.3 */ im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy); if (!im->pixels) { gdFree(im); return NULL; } im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; (i < sy); i++) { /* Row-major ever since gd 1.3 */ im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char)); if (!im->pixels[i]) { for (--i ; i >= 0; i--) { gdFree(im->pixels[i]); } gdFree(im->pixels); gdFree(im); return NULL; } } im->sx = sx; im->sy = sy; im->colorsTotal = 0; im->transparent = (-1); im->interlace = 0; im->thick = 1; im->AA = 0; for (i = 0; (i < gdMaxColors); i++) { im->open[i] = 1; }; im->trueColor = 0; im->tpixels = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->res_x = GD_RESOLUTION; im->res_y = GD_RESOLUTION; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; } /* Function: gdImageCreateTrueColor <gdImageCreateTrueColor> is called to create truecolor images, with an essentially unlimited number of colors. Invoke <gdImageCreateTrueColor> with the x and y dimensions of the desired image. <gdImageCreateTrueColor> returns a <gdImagePtr> to the new image, or NULL if unable to allocate the image. The image must eventually be destroyed using <gdImageDestroy>(). Truecolor images are always filled with black at creation time. There is no concept of a "background" color index. Parameters: sx - The image width. sy - The image height. Returns: A pointer to the new image or NULL if an error occurred. Example: > gdImagePtr im; > im = gdImageCreateTrueColor(64, 64); > // ... Use the image ... > gdImageDestroy(im); See Also: <gdImageCreateTrueColor> */ BGD_DECLARE(gdImagePtr) gdImageCreateTrueColor (int sx, int sy) { int i; gdImagePtr im; if (overflow2(sx, sy)) { return NULL; } if (overflow2(sizeof (int *), sy)) { return 0; } if (overflow2(sizeof(int), sx)) { return NULL; } im = (gdImage *) gdMalloc (sizeof (gdImage)); if (!im) { return 0; } memset (im, 0, sizeof (gdImage)); im->tpixels = (int **) gdMalloc (sizeof (int *) * sy); if (!im->tpixels) { gdFree(im); return 0; } im->polyInts = 0; im->polyAllocated = 0; im->brush = 0; im->tile = 0; im->style = 0; for (i = 0; (i < sy); i++) { im->tpixels[i] = (int *) gdCalloc (sx, sizeof (int)); if (!im->tpixels[i]) { /* 2.0.34 */ i--; while (i >= 0) { gdFree(im->tpixels[i]); i--; } gdFree(im->tpixels); gdFree(im); return 0; } } im->sx = sx; im->sy = sy; im->transparent = (-1); im->interlace = 0; im->trueColor = 1; /* 2.0.2: alpha blending is now on by default, and saving of alpha is off by default. This allows font antialiasing to work as expected on the first try in JPEGs -- quite important -- and also allows for smaller PNGs when saving of alpha channel is not really desired, which it usually isn't! */ im->saveAlphaFlag = 0; im->alphaBlendingFlag = 1; im->thick = 1; im->AA = 0; im->cx1 = 0; im->cy1 = 0; im->cx2 = im->sx - 1; im->cy2 = im->sy - 1; im->res_x = GD_RESOLUTION; im->res_y = GD_RESOLUTION; im->interpolation = NULL; im->interpolation_id = GD_BILINEAR_FIXED; return im; } /* Function: gdImageDestroy <gdImageDestroy> is used to free the memory associated with an image. It is important to invoke <gdImageDestroy> before exiting your program or assigning a new image to a <gdImagePtr> variable. Parameters: im - Pointer to the gdImage to delete. Returns: Nothing. Example: > gdImagePtr im; > im = gdImageCreate(10, 10); > // ... Use the image ... > // Now destroy it > gdImageDestroy(im); */ BGD_DECLARE(void) gdImageDestroy (gdImagePtr im) { int i; if (im->pixels) { for (i = 0; (i < im->sy); i++) { gdFree (im->pixels[i]); } gdFree (im->pixels); } if (im->tpixels) { for (i = 0; (i < im->sy); i++) { gdFree (im->tpixels[i]); } gdFree (im->tpixels); } if (im->polyInts) { gdFree (im->polyInts); } if (im->style) { gdFree (im->style); } gdFree (im); } BGD_DECLARE(int) gdImageColorClosest (gdImagePtr im, int r, int g, int b) { return gdImageColorClosestAlpha (im, r, g, b, gdAlphaOpaque); } BGD_DECLARE(int) gdImageColorClosestAlpha (gdImagePtr im, int r, int g, int b, int a) { int i; long rd, gd, bd, ad; int ct = (-1); int first = 1; long mindist = 0; if (im->trueColor) { return gdTrueColorAlpha (r, g, b, a); } for (i = 0; (i < (im->colorsTotal)); i++) { long dist; if (im->open[i]) { continue; } rd = (im->red[i] - r); gd = (im->green[i] - g); bd = (im->blue[i] - b); /* gd 2.02: whoops, was - b (thanks to David Marwood) */ /* gd 2.16: was blue rather than alpha! Geez! Thanks to Artur Jakub Jerzak */ ad = (im->alpha[i] - a); dist = rd * rd + gd * gd + bd * bd + ad * ad; if (first || (dist < mindist)) { mindist = dist; ct = i; first = 0; } } return ct; } /* This code is taken from http://www.acm.org/jgt/papers/SmithLyons96/hwb_rgb.html, an article * on colour conversion to/from RBG and HWB colour systems. * It has been modified to return the converted value as a * parameter. */ #define RETURN_HWB(h, w, b) {HWB->H = h; HWB->W = w; HWB->B = b; return HWB;} #define RETURN_RGB(r, g, b) {RGB->R = r; RGB->G = g; RGB->B = b; return RGB;} #define HWB_UNDEFINED -1 #define SETUP_RGB(s, r, g, b) {s.R = r/255.0; s.G = g/255.0; s.B = b/255.0;} #define MIN(a,b) ((a)<(b)?(a):(b)) #define MIN3(a,b,c) ((a)<(b)?(MIN(a,c)):(MIN(b,c))) #define MAX(a,b) ((a)<(b)?(b):(a)) #define MAX3(a,b,c) ((a)<(b)?(MAX(b,c)):(MAX(a,c))) /* * Theoretically, hue 0 (pure red) is identical to hue 6 in these transforms. Pure * red always maps to 6 in this implementation. Therefore UNDEFINED can be * defined as 0 in situations where only unsigned numbers are desired. */ typedef struct { float R, G, B; } RGBType; typedef struct { float H, W, B; } HWBType; static HWBType * RGB_to_HWB (RGBType RGB, HWBType * HWB) { /* * RGB are each on [0, 1]. W and B are returned on [0, 1] and H is * returned on [0, 6]. Exception: H is returned UNDEFINED if W == 1 - B. */ float R = RGB.R, G = RGB.G, B = RGB.B, w, v, b, f; int i; w = MIN3 (R, G, B); v = MAX3 (R, G, B); b = 1 - v; if (v == w) RETURN_HWB (HWB_UNDEFINED, w, b); f = (R == w) ? G - B : ((G == w) ? B - R : R - G); i = (R == w) ? 3 : ((G == w) ? 5 : 1); RETURN_HWB (i - f / (v - w), w, b); } static float HWB_Diff (int r1, int g1, int b1, int r2, int g2, int b2) { RGBType RGB1, RGB2; HWBType HWB1, HWB2; float diff; SETUP_RGB (RGB1, r1, g1, b1); SETUP_RGB (RGB2, r2, g2, b2); RGB_to_HWB (RGB1, &HWB1); RGB_to_HWB (RGB2, &HWB2); /* * I made this bit up; it seems to produce OK results, and it is certainly * more visually correct than the current RGB metric. (PJW) */ if ((HWB1.H == HWB_UNDEFINED) || (HWB2.H == HWB_UNDEFINED)) { diff = 0; /* Undefined hues always match... */ } else { diff = fabs (HWB1.H - HWB2.H); if (diff > 3) { diff = 6 - diff; /* Remember, it's a colour circle */ } } diff = diff * diff + (HWB1.W - HWB2.W) * (HWB1.W - HWB2.W) + (HWB1.B - HWB2.B) * (HWB1.B - HWB2.B); return diff; } #if 0 /* * This is not actually used, but is here for completeness, in case someone wants to * use the HWB stuff for anything else... */ static RGBType * HWB_to_RGB (HWBType HWB, RGBType * RGB) { /* * H is given on [0, 6] or UNDEFINED. W and B are given on [0, 1]. * RGB are each returned on [0, 1]. */ float h = HWB.H, w = HWB.W, b = HWB.B, v, n, f; int i; v = 1 - b; if (h == HWB_UNDEFINED) RETURN_RGB (v, v, v); i = floor (h); f = h - i; if (i & 1) f = 1 - f; /* if i is odd */ n = w + f * (v - w); /* linear interpolation between w and v */ switch (i) { case 6: case 0: RETURN_RGB (v, n, w); case 1: RETURN_RGB (n, v, w); case 2: RETURN_RGB (w, v, n); case 3: RETURN_RGB (w, n, v); case 4: RETURN_RGB (n, w, v); case 5: RETURN_RGB (v, w, n); } return RGB; } #endif BGD_DECLARE(int) gdImageColorClosestHWB (gdImagePtr im, int r, int g, int b) { int i; /* long rd, gd, bd; */ int ct = (-1); int first = 1; float mindist = 0; if (im->trueColor) { return gdTrueColor (r, g, b); } for (i = 0; (i < (im->colorsTotal)); i++) { float dist; if (im->open[i]) { continue; } dist = HWB_Diff (im->red[i], im->green[i], im->blue[i], r, g, b); if (first || (dist < mindist)) { mindist = dist; ct = i; first = 0; } } return ct; } BGD_DECLARE(int) gdImageColorExact (gdImagePtr im, int r, int g, int b) { return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque); } BGD_DECLARE(int) gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a) { int i; if (im->trueColor) { return gdTrueColorAlpha (r, g, b, a); } for (i = 0; (i < (im->colorsTotal)); i++) { if (im->open[i]) { continue; } if ((im->red[i] == r) && (im->green[i] == g) && (im->blue[i] == b) && (im->alpha[i] == a)) { return i; } } return -1; } BGD_DECLARE(int) gdImageColorAllocate (gdImagePtr im, int r, int g, int b) { return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque); } BGD_DECLARE(int) gdImageColorAllocateAlpha (gdImagePtr im, int r, int g, int b, int a) { int i; int ct = (-1); if (im->trueColor) { return gdTrueColorAlpha (r, g, b, a); } for (i = 0; (i < (im->colorsTotal)); i++) { if (im->open[i]) { ct = i; break; } } if (ct == (-1)) { ct = im->colorsTotal; if (ct == gdMaxColors) { return -1; } im->colorsTotal++; } im->red[ct] = r; im->green[ct] = g; im->blue[ct] = b; im->alpha[ct] = a; im->open[ct] = 0; return ct; } /* * gdImageColorResolve is an alternative for the code fragment: * * if ((color=gdImageColorExact(im,R,G,B)) < 0) * if ((color=gdImageColorAllocate(im,R,G,B)) < 0) * color=gdImageColorClosest(im,R,G,B); * * in a single function. Its advantage is that it is guaranteed to * return a color index in one search over the color table. */ BGD_DECLARE(int) gdImageColorResolve (gdImagePtr im, int r, int g, int b) { return gdImageColorResolveAlpha (im, r, g, b, gdAlphaOpaque); } BGD_DECLARE(int) gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a) { int c; int ct = -1; int op = -1; long rd, gd, bd, ad, dist; long mindist = 4 * 255 * 255; /* init to max poss dist */ if (im->trueColor) { return gdTrueColorAlpha (r, g, b, a); } for (c = 0; c < im->colorsTotal; c++) { if (im->open[c]) { op = c; /* Save open slot */ continue; /* Color not in use */ } if (c == im->transparent) { /* don't ever resolve to the color that has * been designated as the transparent color */ continue; } rd = (long) (im->red[c] - r); gd = (long) (im->green[c] - g); bd = (long) (im->blue[c] - b); ad = (long) (im->alpha[c] - a); dist = rd * rd + gd * gd + bd * bd + ad * ad; if (dist < mindist) { if (dist == 0) { return c; /* Return exact match color */ } mindist = dist; ct = c; } } /* no exact match. We now know closest, but first try to allocate exact */ if (op == -1) { op = im->colorsTotal; if (op == gdMaxColors) { /* No room for more colors */ return ct; /* Return closest available color */ } im->colorsTotal++; } im->red[op] = r; im->green[op] = g; im->blue[op] = b; im->alpha[op] = a; im->open[op] = 0; return op; /* Return newly allocated color */ } BGD_DECLARE(void) gdImageColorDeallocate (gdImagePtr im, int color) { if (im->trueColor || (color >= gdMaxColors) || (color < 0)) { return; } /* Mark it open. */ im->open[color] = 1; } BGD_DECLARE(void) gdImageColorTransparent (gdImagePtr im, int color) { if (!im->trueColor) { if((color < -1) || (color >= gdMaxColors)) { return; } if (im->transparent != -1) { im->alpha[im->transparent] = gdAlphaOpaque; } if (color != -1) { im->alpha[color] = gdAlphaTransparent; } } im->transparent = color; } BGD_DECLARE(void) gdImagePaletteCopy (gdImagePtr to, gdImagePtr from) { int i; int x, y, p; int xlate[256]; if (to->trueColor) { return; } if (from->trueColor) { return; } for (i = 0; i < 256; i++) { xlate[i] = -1; }; for (y = 0; y < (to->sy); y++) { for (x = 0; x < (to->sx); x++) { /* Optimization: no gdImageGetPixel */ p = to->pixels[y][x]; if (xlate[p] == -1) { /* This ought to use HWB, but we don't have an alpha-aware version of that yet. */ xlate[p] = gdImageColorClosestAlpha (from, to->red[p], to->green[p], to->blue[p], to->alpha[p]); /*printf("Mapping %d (%d, %d, %d, %d) to %d (%d, %d, %d, %d)\n", */ /* p, to->red[p], to->green[p], to->blue[p], to->alpha[p], */ /* xlate[p], from->red[xlate[p]], from->green[xlate[p]], from->blue[xlate[p]], from->alpha[xlate[p]]); */ }; /* Optimization: no gdImageSetPixel */ to->pixels[y][x] = xlate[p]; }; }; for (i = 0; (i < (from->colorsTotal)); i++) { /*printf("Copying color %d (%d, %d, %d, %d)\n", i, from->red[i], from->blue[i], from->green[i], from->alpha[i]); */ to->red[i] = from->red[i]; to->blue[i] = from->blue[i]; to->green[i] = from->green[i]; to->alpha[i] = from->alpha[i]; to->open[i] = 0; }; for (i = from->colorsTotal; (i < to->colorsTotal); i++) { to->open[i] = 1; }; to->colorsTotal = from->colorsTotal; } BGD_DECLARE(int) gdImageColorReplace (gdImagePtr im, int src, int dst) { register int x, y; int n = 0; if (src == dst) { return 0; } #define REPLACING_LOOP(pixel) do { \ for (y = im->cy1; y <= im->cy2; y++) { \ for (x = im->cx1; x <= im->cx2; x++) { \ if (pixel(im, x, y) == src) { \ gdImageSetPixel(im, x, y, dst); \ n++; \ } \ } \ } \ } while (0) if (im->trueColor) { REPLACING_LOOP(gdImageTrueColorPixel); } else { REPLACING_LOOP(gdImagePalettePixel); } #undef REPLACING_LOOP return n; } BGD_DECLARE(int) gdImageColorReplaceThreshold (gdImagePtr im, int src, int dst, float threshold) { register int x, y; int n = 0; if (src == dst) { return 0; } #define REPLACING_LOOP(pixel) do { \ for (y = im->cy1; y <= im->cy2; y++) { \ for (x = im->cx1; x <= im->cx2; x++) { \ if (gdColorMatch(im, src, pixel(im, x, y), threshold)) { \ gdImageSetPixel(im, x, y, dst); \ n++; \ } \ } \ } \ } while (0) if (im->trueColor) { REPLACING_LOOP(gdImageTrueColorPixel); } else { REPLACING_LOOP(gdImagePalettePixel); } #undef REPLACING_LOOP return n; } static int colorCmp (const void *x, const void *y) { int a = *(int const *)x; int b = *(int const *)y; return (a > b) - (a < b); } BGD_DECLARE(int) gdImageColorReplaceArray (gdImagePtr im, int len, int *src, int *dst) { register int x, y; int c, *d, *base; int i, n = 0; if (len <= 0 || src == dst) { return 0; } if (len == 1) { return gdImageColorReplace(im, src[0], dst[0]); } if (overflow2(len, sizeof(int)<<1)) { return -1; } base = (int *)gdMalloc(len * (sizeof(int)<<1)); if (!base) { return -1; } for (i = 0; i < len; i++) { base[(i<<1)] = src[i]; base[(i<<1)+1] = dst[i]; } qsort(base, len, sizeof(int)<<1, colorCmp); #define REPLACING_LOOP(pixel) do { \ for (y = im->cy1; y <= im->cy2; y++) { \ for (x = im->cx1; x <= im->cx2; x++) { \ c = pixel(im, x, y); \ if ( (d = (int *)bsearch(&c, base, len, sizeof(int)<<1, colorCmp)) ) { \ gdImageSetPixel(im, x, y, d[1]); \ n++; \ } \ } \ } \ } while (0) if (im->trueColor) { REPLACING_LOOP(gdImageTrueColorPixel); } else { REPLACING_LOOP(gdImagePalettePixel); } #undef REPLACING_LOOP gdFree(base); return n; } BGD_DECLARE(int) gdImageColorReplaceCallback (gdImagePtr im, gdCallbackImageColor callback) { int c, d, n = 0; if (!callback) { return 0; } if (im->trueColor) { register int x, y; for (y = im->cy1; y <= im->cy2; y++) { for (x = im->cx1; x <= im->cx2; x++) { c = gdImageTrueColorPixel(im, x, y); if ( (d = callback(im, c)) != c) { gdImageSetPixel(im, x, y, d); n++; } } } } else { /* palette */ int *sarr, *darr; int k, len = 0; sarr = (int *)gdCalloc(im->colorsTotal, sizeof(int)); if (!sarr) { return -1; } for (c = 0; c < im->colorsTotal; c++) { if (!im->open[c]) { sarr[len++] = c; } } darr = (int *)gdCalloc(len, sizeof(int)); if (!darr) { gdFree(sarr); return -1; } for (k = 0; k < len; k++) { darr[k] = callback(im, sarr[k]); } n = gdImageColorReplaceArray(im, k, sarr, darr); gdFree(darr); gdFree(sarr); } return n; } /* 2.0.10: before the drawing routines, some code to clip points that are * outside the drawing window. Nick Atty (nick@canalplan.org.uk) * * This is the Sutherland Hodgman Algorithm, as implemented by * Duvanenko, Robbins and Gyurcsik - SH(DRG) for short. See Dr Dobb's * Journal, January 1996, pp107-110 and 116-117 * * Given the end points of a line, and a bounding rectangle (which we * know to be from (0,0) to (SX,SY)), adjust the endpoints to be on * the edges of the rectangle if the line should be drawn at all, * otherwise return a failure code */ /* this does "one-dimensional" clipping: note that the second time it is called, all the x parameters refer to height and the y to width - the comments ignore this (if you can understand it when it's looking at the X parameters, it should become clear what happens on the second call!) The code is simplified from that in the article, as we know that gd images always start at (0,0) */ /* 2.0.26, TBB: we now have to respect a clipping rectangle, it won't necessarily start at 0. */ static int clip_1d (int *x0, int *y0, int *x1, int *y1, int mindim, int maxdim) { double m; /* gradient of line */ if (*x0 < mindim) { /* start of line is left of window */ if (*x1 < mindim) /* as is the end, so the line never cuts the window */ return 0; m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */ /* adjust x0 to be on the left boundary (ie to be zero), and y0 to match */ *y0 -= (int)(m * (*x0 - mindim)); *x0 = mindim; /* now, perhaps, adjust the far end of the line as well */ if (*x1 > maxdim) { *y1 += m * (maxdim - *x1); *x1 = maxdim; } return 1; } if (*x0 > maxdim) { /* start of line is right of window - complement of above */ if (*x1 > maxdim) /* as is the end, so the line misses the window */ return 0; m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */ *y0 += (int)(m * (maxdim - *x0)); /* adjust so point is on the right boundary */ *x0 = maxdim; /* now, perhaps, adjust the end of the line */ if (*x1 < mindim) { *y1 -= (int)(m * (*x1 - mindim)); *x1 = mindim; } return 1; } /* the final case - the start of the line is inside the window */ if (*x1 > maxdim) { /* other end is outside to the right */ m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */ *y1 += (int)(m * (maxdim - *x1)); *x1 = maxdim; return 1; } if (*x1 < mindim) { /* other end is outside to the left */ m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */ *y1 -= (int)(m * (*x1 - mindim)); *x1 = mindim; return 1; } /* only get here if both points are inside the window */ return 1; } /* end of line clipping code */ BGD_DECLARE(void) gdImageSetPixel (gdImagePtr im, int x, int y, int color) { int p; switch (color) { case gdStyled: if (!im->style) { /* Refuse to draw if no style is set. */ return; } else { p = im->style[im->stylePos++]; } if (p != (gdTransparent)) { gdImageSetPixel (im, x, y, p); } im->stylePos = im->stylePos % im->styleLength; break; case gdStyledBrushed: if (!im->style) { /* Refuse to draw if no style is set. */ return; } p = im->style[im->stylePos++]; if ((p != gdTransparent) && (p != 0)) { gdImageSetPixel (im, x, y, gdBrushed); } im->stylePos = im->stylePos % im->styleLength; break; case gdBrushed: gdImageBrushApply (im, x, y); break; case gdTiled: gdImageTileApply (im, x, y); break; case gdAntiAliased: /* This shouldn't happen (2.0.26) because we just call gdImageAALine now, but do something sane. */ gdImageSetPixel(im, x, y, im->AA_color); break; default: if (gdImageBoundsSafeMacro (im, x, y)) { if (im->trueColor) { switch (im->alphaBlendingFlag) { default: case gdEffectReplace: im->tpixels[y][x] = color; break; case gdEffectAlphaBlend: case gdEffectNormal: im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color); break; case gdEffectOverlay : im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color); break; case gdEffectMultiply : im->tpixels[y][x] = gdLayerMultiply(im->tpixels[y][x], color); break; } } else { im->pixels[y][x] = color; } } break; } } static void gdImageBrushApply (gdImagePtr im, int x, int y) { int lx, ly; int hy; int hx; int x1, y1, x2, y2; int srcx, srcy; if (!im->brush) { return; } hy = gdImageSY (im->brush) / 2; y1 = y - hy; y2 = y1 + gdImageSY (im->brush); hx = gdImageSX (im->brush) / 2; x1 = x - hx; x2 = x1 + gdImageSX (im->brush); srcy = 0; if (im->trueColor) { if (im->brush->trueColor) { for (ly = y1; (ly < y2); ly++) { srcx = 0; for (lx = x1; (lx < x2); lx++) { int p; p = gdImageGetTrueColorPixel (im->brush, srcx, srcy); /* 2.0.9, Thomas Winzig: apply simple full transparency */ if (p != gdImageGetTransparent (im->brush)) { gdImageSetPixel (im, lx, ly, p); } srcx++; } srcy++; } } else { /* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger for pointing out the issue) */ for (ly = y1; (ly < y2); ly++) { srcx = 0; for (lx = x1; (lx < x2); lx++) { int p, tc; p = gdImageGetPixel (im->brush, srcx, srcy); tc = gdImageGetTrueColorPixel (im->brush, srcx, srcy); /* 2.0.9, Thomas Winzig: apply simple full transparency */ if (p != gdImageGetTransparent (im->brush)) { gdImageSetPixel (im, lx, ly, tc); } srcx++; } srcy++; } } } else { for (ly = y1; (ly < y2); ly++) { srcx = 0; for (lx = x1; (lx < x2); lx++) { int p; p = gdImageGetPixel (im->brush, srcx, srcy); /* Allow for non-square brushes! */ if (p != gdImageGetTransparent (im->brush)) { /* Truecolor brush. Very slow on a palette destination. */ if (im->brush->trueColor) { gdImageSetPixel (im, lx, ly, gdImageColorResolveAlpha (im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p))); } else { gdImageSetPixel (im, lx, ly, im->brushColorMap[p]); } } srcx++; } srcy++; } } } static void gdImageTileApply (gdImagePtr im, int x, int y) { gdImagePtr tile = im->tile; int srcx, srcy; int p; if (!tile) { return; } srcx = x % gdImageSX (tile); srcy = y % gdImageSY (tile); if (im->trueColor) { p = gdImageGetPixel (tile, srcx, srcy); if (p != gdImageGetTransparent (tile)) { if (!tile->trueColor) { p = gdTrueColorAlpha(tile->red[p], tile->green[p], tile->blue[p], tile->alpha[p]); } gdImageSetPixel (im, x, y, p); } } else { p = gdImageGetPixel (tile, srcx, srcy); /* Allow for transparency */ if (p != gdImageGetTransparent (tile)) { if (tile->trueColor) { /* Truecolor tile. Very slow on a palette destination. */ gdImageSetPixel (im, x, y, gdImageColorResolveAlpha (im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p))); } else { gdImageSetPixel (im, x, y, im->tileColorMap[p]); } } } } BGD_DECLARE(int) gdImageGetPixel (gdImagePtr im, int x, int y) { if (gdImageBoundsSafeMacro (im, x, y)) { if (im->trueColor) { return im->tpixels[y][x]; } else { return im->pixels[y][x]; } } else { return 0; } } BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y) { int p = gdImageGetPixel (im, x, y); if (!im->trueColor) { return gdTrueColorAlpha (im->red[p], im->green[p], im->blue[p], (im->transparent == p) ? gdAlphaTransparent : im->alpha[p]); } else { return p; } } BGD_DECLARE(void) gdImageAABlend (gdImagePtr im) { /* NO-OP, kept for library compatibility. */ (void)im; } static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col); static void gdImageHLine(gdImagePtr im, int y, int x1, int x2, int col) { if (im->thick > 1) { int thickhalf = im->thick >> 1; gdImageFilledRectangle(im, x1, y - thickhalf, x2, y + im->thick - thickhalf - 1, col); } else { if (x2 < x1) { int t = x2; x2 = x1; x1 = t; } for (; x1 <= x2; x1++) { gdImageSetPixel(im, x1, y, col); } } return; } static void gdImageVLine(gdImagePtr im, int x, int y1, int y2, int col) { if (im->thick > 1) { int thickhalf = im->thick >> 1; gdImageFilledRectangle(im, x - thickhalf, y1, x + im->thick - thickhalf - 1, y2, col); } else { if (y2 < y1) { int t = y1; y1 = y2; y2 = t; } for (; y1 <= y2; y1++) { gdImageSetPixel(im, x, y1, col); } } return; } /* Bresenham as presented in Foley & Van Dam */ BGD_DECLARE(void) gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag; int wid; int w, wstart; int thick; if (color == gdAntiAliased) { /* gdAntiAliased passed as color: use the much faster, much cheaper and equally attractive gdImageAALine implementation. That clips too, so don't clip twice. */ gdImageAALine(im, x1, y1, x2, y2, im->AA_color); return; } /* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn. 2.0.26, TBB: clip to edges of clipping rectangle. We were getting away with this because gdImageSetPixel is used for actual drawing, but this is still more efficient and opens the way to skip per-pixel bounds checking in the future. */ if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0) return; if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0) return; thick = im->thick; dx = abs (x2 - x1); dy = abs (y2 - y1); if (dx == 0) { gdImageVLine(im, x1, y1, y2, color); return; } else if (dy == 0) { gdImageHLine(im, y1, x1, x2, color); return; } if (dy <= dx) { /* More-or-less horizontal. use wid for vertical stroke */ /* Doug Claar: watch out for NaN in atan2 (2.0.5) */ /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double ac = cos (atan2 (dy, dx)); if (ac != 0) { wid = thick / ac; } else { wid = 1; } if (wid == 0) { wid = 1; } d = 2 * dy - dx; incr1 = 2 * dy; incr2 = 2 * (dy - dx); if (x1 > x2) { x = x2; y = y2; ydirflag = (-1); xend = x1; } else { x = x1; y = y1; ydirflag = 1; xend = x2; } /* Set up line thickness */ wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, x, w, color); if (((y2 - y1) * ydirflag) > 0) { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y++; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, x, w, color); } } else { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y--; d += incr2; } wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, x, w, color); } } } else { /* More-or-less vertical. use wid for horizontal stroke */ /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double as = sin (atan2 (dy, dx)); if (as != 0) { wid = thick / as; } else { wid = 1; } if (wid == 0) wid = 1; d = 2 * dx - dy; incr1 = 2 * dx; incr2 = 2 * (dx - dy); if (y1 > y2) { y = y2; x = x2; yend = y1; xdirflag = (-1); } else { y = y1; x = x1; yend = y2; xdirflag = 1; } /* Set up line thickness */ wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, w, y, color); if (((x2 - x1) * xdirflag) > 0) { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x++; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, w, y, color); } } else { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x--; d += incr2; } wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, w, y, color); } } } } static void dashedSet (gdImagePtr im, int x, int y, int color, int *onP, int *dashStepP, int wid, int vert); BGD_DECLARE(void) gdImageDashedLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag; int dashStep = 0; int on = 1; int wid; int vert; int thick = im->thick; dx = abs (x2 - x1); dy = abs (y2 - y1); if (dy <= dx) { /* More-or-less horizontal. use wid for vertical stroke */ /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double as = sin (atan2 (dy, dx)); if (as != 0) { wid = thick / as; } else { wid = 1; } vert = 1; d = 2 * dy - dx; incr1 = 2 * dy; incr2 = 2 * (dy - dx); if (x1 > x2) { x = x2; y = y2; ydirflag = (-1); xend = x1; } else { x = x1; y = y1; ydirflag = 1; xend = x2; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); if (((y2 - y1) * ydirflag) > 0) { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y++; d += incr2; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); } } else { while (x < xend) { x++; if (d < 0) { d += incr1; } else { y--; d += incr2; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); } } } else { /* 2.0.12: Michael Schwartz: divide rather than multiply; TBB: but watch out for /0! */ double as = sin (atan2 (dy, dx)); if (as != 0) { wid = thick / as; } else { wid = 1; } vert = 0; d = 2 * dx - dy; incr1 = 2 * dx; incr2 = 2 * (dx - dy); if (y1 > y2) { y = y2; x = x2; yend = y1; xdirflag = (-1); } else { y = y1; x = x1; yend = y2; xdirflag = 1; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); if (((x2 - x1) * xdirflag) > 0) { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x++; d += incr2; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); } } else { while (y < yend) { y++; if (d < 0) { d += incr1; } else { x--; d += incr2; } dashedSet (im, x, y, color, &on, &dashStep, wid, vert); } } } } static void dashedSet (gdImagePtr im, int x, int y, int color, int *onP, int *dashStepP, int wid, int vert) { int dashStep = *dashStepP; int on = *onP; int w, wstart; dashStep++; if (dashStep == gdDashSize) { dashStep = 0; on = !on; } if (on) { if (vert) { wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, x, w, color); } else { wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) gdImageSetPixel (im, w, y, color); } } *dashStepP = dashStep; *onP = on; } BGD_DECLARE(int) gdImageBoundsSafe (gdImagePtr im, int x, int y) { return gdImageBoundsSafeMacro (im, x, y); } BGD_DECLARE(void) gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color) { int cx, cy; int px, py; int fline; cx = 0; cy = 0; #ifdef CHARSET_EBCDIC c = ASC (c); #endif /*CHARSET_EBCDIC */ if ((c < f->offset) || (c >= (f->offset + f->nchars))) { return; } fline = (c - f->offset) * f->h * f->w; for (py = y; (py < (y + f->h)); py++) { for (px = x; (px < (x + f->w)); px++) { if (f->data[fline + cy * f->w + cx]) { gdImageSetPixel (im, px, py, color); } cx++; } cx = 0; cy++; } } BGD_DECLARE(void) gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color) { int cx, cy; int px, py; int fline; cx = 0; cy = 0; #ifdef CHARSET_EBCDIC c = ASC (c); #endif /*CHARSET_EBCDIC */ if ((c < f->offset) || (c >= (f->offset + f->nchars))) { return; } fline = (c - f->offset) * f->h * f->w; for (py = y; (py > (y - f->w)); py--) { for (px = x; (px < (x + f->h)); px++) { if (f->data[fline + cy * f->w + cx]) { gdImageSetPixel (im, px, py, color); } cy++; } cy = 0; cx++; } } BGD_DECLARE(void) gdImageString (gdImagePtr im, gdFontPtr f, int x, int y, unsigned char *s, int color) { int i; int l; l = strlen ((char *) s); for (i = 0; (i < l); i++) { gdImageChar (im, f, x, y, s[i], color); x += f->w; } } BGD_DECLARE(void) gdImageStringUp (gdImagePtr im, gdFontPtr f, int x, int y, unsigned char *s, int color) { int i; int l; l = strlen ((char *) s); for (i = 0; (i < l); i++) { gdImageCharUp (im, f, x, y, s[i], color); y -= f->w; } } static int strlen16 (unsigned short *s); BGD_DECLARE(void) gdImageString16 (gdImagePtr im, gdFontPtr f, int x, int y, unsigned short *s, int color) { int i; int l; l = strlen16 (s); for (i = 0; (i < l); i++) { gdImageChar (im, f, x, y, s[i], color); x += f->w; } } BGD_DECLARE(void) gdImageStringUp16 (gdImagePtr im, gdFontPtr f, int x, int y, unsigned short *s, int color) { int i; int l; l = strlen16 (s); for (i = 0; (i < l); i++) { gdImageCharUp (im, f, x, y, s[i], color); y -= f->w; } } static int strlen16 (unsigned short *s) { int len = 0; while (*s) { s++; len++; } return len; } #ifndef HAVE_LSQRT /* If you don't have a nice square root function for longs, you can use ** this hack */ long lsqrt (long n) { long result = (long) sqrt ((double) n); return result; } #endif /* s and e are integers modulo 360 (degrees), with 0 degrees being the rightmost extreme and degrees changing clockwise. cx and cy are the center in pixels; w and h are the horizontal and vertical diameter in pixels. Nice interface, but slow. See gd_arc_f_buggy.c for a better version that doesn't seem to be bug-free yet. */ BGD_DECLARE(void) gdImageArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e, int color) { gdImageFilledArc (im, cx, cy, w, h, s, e, color, gdNoFill); } BGD_DECLARE(void) gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e, int color, int style) { gdPoint pts[3]; int i; int lx = 0, ly = 0; int fx = 0, fy = 0; if ((s % 360) == (e % 360)) { s = 0; e = 360; } else { if (s > 360) { s = s % 360; } if (e > 360) { e = e % 360; } while (s < 0) { s += 360; } while (e < s) { e += 360; } if (s == e) { s = 0; e = 360; } } for (i = s; (i <= e); i++) { int x, y; x = ((long) gdCosT[i % 360] * (long) w / (2 * 1024)) + cx; y = ((long) gdSinT[i % 360] * (long) h / (2 * 1024)) + cy; if (i != s) { if (!(style & gdChord)) { if (style & gdNoFill) { gdImageLine (im, lx, ly, x, y, color); } else { /* This is expensive! */ pts[0].x = lx; pts[0].y = ly; pts[1].x = x; pts[1].y = y; pts[2].x = cx; pts[2].y = cy; gdImageFilledPolygon (im, pts, 3, color); } } } else { fx = x; fy = y; } lx = x; ly = y; } if (style & gdChord) { if (style & gdNoFill) { if (style & gdEdged) { gdImageLine (im, cx, cy, lx, ly, color); gdImageLine (im, cx, cy, fx, fy, color); } gdImageLine (im, fx, fy, lx, ly, color); } else { pts[0].x = fx; pts[0].y = fy; pts[1].x = lx; pts[1].y = ly; pts[2].x = cx; pts[2].y = cy; gdImageFilledPolygon (im, pts, 3, color); } } else { if (style & gdNoFill) { if (style & gdEdged) { gdImageLine (im, cx, cy, lx, ly, color); gdImageLine (im, cx, cy, fx, fy, color); } } } } BGD_DECLARE(void) gdImageEllipse(gdImagePtr im, int mx, int my, int w, int h, int c) { int x=0,mx1=0,mx2=0,my1=0,my2=0; long aq,bq,dx,dy,r,rx,ry,a,b; a=w>>1; b=h>>1; gdImageSetPixel(im,mx+a, my, c); gdImageSetPixel(im,mx-a, my, c); mx1 = mx-a; my1 = my; mx2 = mx+a; my2 = my; aq = a * a; bq = b * b; dx = aq << 1; dy = bq << 1; r = a * bq; rx = r << 1; ry = 0; x = a; while (x > 0) { if (r > 0) { my1++; my2--; ry +=dx; r -=ry; } if (r <= 0) { x--; mx1++; mx2--; rx -=dy; r +=rx; } gdImageSetPixel(im,mx1, my1, c); gdImageSetPixel(im,mx1, my2, c); gdImageSetPixel(im,mx2, my1, c); gdImageSetPixel(im,mx2, my2, c); } } BGD_DECLARE(void) gdImageFilledEllipse (gdImagePtr im, int mx, int my, int w, int h, int c) { int x=0,mx1=0,mx2=0,my1=0,my2=0; long aq,bq,dx,dy,r,rx,ry,a,b; int i; int old_y2; a=w>>1; b=h>>1; for (x = mx-a; x <= mx+a; x++) { gdImageSetPixel(im, x, my, c); } mx1 = mx-a; my1 = my; mx2 = mx+a; my2 = my; aq = a * a; bq = b * b; dx = aq << 1; dy = bq << 1; r = a * bq; rx = r << 1; ry = 0; x = a; old_y2=-2; while (x > 0) { if (r > 0) { my1++; my2--; ry +=dx; r -=ry; } if (r <= 0) { x--; mx1++; mx2--; rx -=dy; r +=rx; } if(old_y2!=my2) { for(i=mx1; i<=mx2; i++) { gdImageSetPixel(im,i,my1,c); } } if(old_y2!=my2) { for(i=mx1; i<=mx2; i++) { gdImageSetPixel(im,i,my2,c); } } old_y2 = my2; } } BGD_DECLARE(void) gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color) { int lastBorder; /* Seek left */ int leftLimit, rightLimit; int i; int restoreAlphaBleding; if (border < 0) { /* Refuse to fill to a non-solid border */ return; } leftLimit = (-1); restoreAlphaBleding = im->alphaBlendingFlag; im->alphaBlendingFlag = 0; if (x >= im->sx) { x = im->sx - 1; } else if (x < 0) { x = 0; } if (y >= im->sy) { y = im->sy - 1; } else if (y < 0) { y = 0; } for (i = x; (i >= 0); i--) { if (gdImageGetPixel (im, i, y) == border) { break; } gdImageSetPixel (im, i, y, color); leftLimit = i; } if (leftLimit == (-1)) { im->alphaBlendingFlag = restoreAlphaBleding; return; } /* Seek right */ rightLimit = x; for (i = (x + 1); (i < im->sx); i++) { if (gdImageGetPixel (im, i, y) == border) { break; } gdImageSetPixel (im, i, y, color); rightLimit = i; } /* Look at lines above and below and start paints */ /* Above */ if (y > 0) { lastBorder = 1; for (i = leftLimit; (i <= rightLimit); i++) { int c; c = gdImageGetPixel (im, i, y - 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder (im, i, y - 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } /* Below */ if (y < ((im->sy) - 1)) { lastBorder = 1; for (i = leftLimit; (i <= rightLimit); i++) { int c = gdImageGetPixel (im, i, y + 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder (im, i, y + 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } im->alphaBlendingFlag = restoreAlphaBleding; } /* * set the pixel at (x,y) and its 4-connected neighbors * with the same pixel value to the new pixel value nc (new color). * A 4-connected neighbor: pixel above, below, left, or right of a pixel. * ideas from comp.graphics discussions. * For tiled fill, the use of a flag buffer is mandatory. As the tile image can * contain the same color as the color to fill. To do not bloat normal filling * code I added a 2nd private function. */ static int gdImageTileGet (gdImagePtr im, int x, int y) { int srcx, srcy; int tileColor,p; if (!im->tile) { return -1; } srcx = x % gdImageSX(im->tile); srcy = y % gdImageSY(im->tile); p = gdImageGetPixel(im->tile, srcx, srcy); if (p == im->tile->transparent) { tileColor = im->transparent; } else if (im->trueColor) { if (im->tile->trueColor) { tileColor = p; } else { tileColor = gdTrueColorAlpha( gdImageRed(im->tile,p), gdImageGreen(im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p)); } } else { if (im->tile->trueColor) { tileColor = gdImageColorResolveAlpha(im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p)); } else { tileColor = gdImageColorResolveAlpha(im, gdImageRed (im->tile,p), gdImageGreen (im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p)); } } return tileColor; } /* horizontal segment of scan line y */ struct seg { int y, xl, xr, dy; }; /* max depth of stack */ #define FILL_MAX ((int)(im->sy*im->sx)/4) #define FILL_PUSH(Y, XL, XR, DY) \ if (sp<stack+FILL_MAX && Y+(DY)>=0 && Y+(DY)<wy2) \ {sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;} #define FILL_POP(Y, XL, XR, DY) \ {sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;} static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc); BGD_DECLARE(void) gdImageFill(gdImagePtr im, int x, int y, int nc) { int l, x1, x2, dy; int oc; /* old pixel value */ int wx2,wy2; int alphablending_bak; /* stack of filled segments */ /* struct seg stack[FILL_MAX],*sp = stack; */ struct seg *stack; struct seg *sp; if (!im->trueColor && nc > (im->colorsTotal - 1)) { return; } alphablending_bak = im->alphaBlendingFlag; im->alphaBlendingFlag = 0; if (nc==gdTiled) { _gdImageFillTiled(im,x,y,nc); im->alphaBlendingFlag = alphablending_bak; return; } wx2=im->sx; wy2=im->sy; oc = gdImageGetPixel(im, x, y); if (oc==nc || x<0 || x>wx2 || y<0 || y>wy2) { im->alphaBlendingFlag = alphablending_bak; return; } /* Do not use the 4 neighbors implementation with * small images */ if (im->sx < 4) { int ix = x, iy = y, c; do { do { c = gdImageGetPixel(im, ix, iy); if (c != oc) { goto done; } gdImageSetPixel(im, ix, iy, nc); } while(ix++ < (im->sx -1)); ix = x; } while(iy++ < (im->sy -1)); goto done; } if(overflow2(im->sy, im->sx)) { return; } if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) { return; } stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4)); if (!stack) { return; } sp = stack; /* required! */ FILL_PUSH(y,x,x,1); /* seed segment (popped 1st) */ FILL_PUSH(y+1, x, x, -1); while (sp>stack) { FILL_POP(y, x1, x2, dy); for (x=x1; x>=0 && gdImageGetPixel(im,x, y)==oc; x--) { gdImageSetPixel(im,x, y, nc); } if (x>=x1) { goto skip; } l = x+1; /* leak on left? */ if (l<x1) { FILL_PUSH(y, l, x1-1, -dy); } x = x1+1; do { for (; x<=wx2 && gdImageGetPixel(im,x, y)==oc; x++) { gdImageSetPixel(im, x, y, nc); } FILL_PUSH(y, l, x-1, dy); /* leak on right? */ if (x>x2+1) { FILL_PUSH(y, x2+1, x-1, -dy); } skip: for (x++; x<=x2 && (gdImageGetPixel(im, x, y)!=oc); x++); l = x; } while (x<=x2); } gdFree(stack); done: im->alphaBlendingFlag = alphablending_bak; } static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc) { int l, x1, x2, dy; int oc; /* old pixel value */ int wx2,wy2; /* stack of filled segments */ struct seg *stack; struct seg *sp; char *pts; if (!im->tile) { return; } wx2=im->sx; wy2=im->sy; if(overflow2(im->sy, im->sx)) { return; } if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) { return; } pts = (char *) gdCalloc(im->sy * im->sx, sizeof(char)); if (!pts) { return; } stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4)); if (!stack) { gdFree(pts); return; } sp = stack; oc = gdImageGetPixel(im, x, y); /* required! */ FILL_PUSH(y,x,x,1); /* seed segment (popped 1st) */ FILL_PUSH(y+1, x, x, -1); while (sp>stack) { FILL_POP(y, x1, x2, dy); for (x=x1; x>=0 && (!pts[y + x*wy2] && gdImageGetPixel(im,x,y)==oc); x--) { nc = gdImageTileGet(im,x,y); pts[y + x*wy2]=1; gdImageSetPixel(im,x, y, nc); } if (x>=x1) { goto skip; } l = x+1; /* leak on left? */ if (l<x1) { FILL_PUSH(y, l, x1-1, -dy); } x = x1+1; do { for (; x<wx2 && (!pts[y + x*wy2] && gdImageGetPixel(im,x, y)==oc) ; x++) { if (pts[y + x*wy2]) { /* we should never be here */ break; } nc = gdImageTileGet(im,x,y); pts[y + x*wy2]=1; gdImageSetPixel(im, x, y, nc); } FILL_PUSH(y, l, x-1, dy); /* leak on right? */ if (x>x2+1) { FILL_PUSH(y, x2+1, x-1, -dy); } skip: for (x++; x<=x2 && (pts[y + x*wy2] || gdImageGetPixel(im,x, y)!=oc); x++); l = x; } while (x<=x2); } gdFree(pts); gdFree(stack); } BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int thick = im->thick; if (x1 == x2 && y1 == y2 && thick == 1) { gdImageSetPixel(im, x1, y1, color); return; } if (y2 < y1) { int t; t = y1; y1 = y2; y2 = t; t = x1; x1 = x2; x2 = t; } if (thick > 1) { int cx, cy, x1ul, y1ul, x2lr, y2lr; int half = thick >> 1; x1ul = x1 - half; y1ul = y1 - half; x2lr = x2 + half; y2lr = y2 + half; cy = y1ul + thick; while (cy-- > y1ul) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y2lr - thick; while (cy++ < y2lr) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x1ul - 1; while (cx++ < x1ul + thick) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x2lr - thick - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } return; } else { gdImageLine(im, x1, y1, x2, y1, color); gdImageLine(im, x1, y2, x2, y2, color); gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color); gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color); } } BGD_DECLARE(void) gdImageFilledRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int x, y; if (x1 == x2 && y1 == y2) { gdImageSetPixel(im, x1, y1, color); return; } if (x1 > x2) { x = x1; x1 = x2; x2 = x; } if (y1 > y2) { y = y1; y1 = y2; y2 = y; } if (x1 < 0) { x1 = 0; } if (x2 >= gdImageSX(im)) { x2 = gdImageSX(im) - 1; } if (y1 < 0) { y1 = 0; } if (y2 >= gdImageSY(im)) { y2 = gdImageSY(im) - 1; } for (y = y1; (y <= y2); y++) { for (x = x1; (x <= x2); x++) { gdImageSetPixel (im, x, y, color); } } } BGD_DECLARE(gdImagePtr) gdImageClone (gdImagePtr src) { gdImagePtr dst; register int i, x; if (src->trueColor) { dst = gdImageCreateTrueColor(src->sx , src->sy); } else { dst = gdImageCreate(src->sx , src->sy); } if (dst == NULL) { return NULL; } if (src->trueColor == 0) { dst->colorsTotal = src->colorsTotal; for (i = 0; i < gdMaxColors; i++) { dst->red[i] = src->red[i]; dst->green[i] = src->green[i]; dst->blue[i] = src->blue[i]; dst->alpha[i] = src->alpha[i]; dst->open[i] = src->open[i]; } for (i = 0; i < src->sy; i++) { for (x = 0; x < src->sx; x++) { dst->pixels[i][x] = src->pixels[i][x]; } } } else { for (i = 0; i < src->sy; i++) { for (x = 0; x < src->sx; x++) { dst->tpixels[i][x] = src->tpixels[i][x]; } } } if (src->styleLength > 0) { dst->styleLength = src->styleLength; dst->stylePos = src->stylePos; for (i = 0; i < src->styleLength; i++) { dst->style[i] = src->style[i]; } } dst->interlace = src->interlace; dst->alphaBlendingFlag = src->alphaBlendingFlag; dst->saveAlphaFlag = src->saveAlphaFlag; dst->AA = src->AA; dst->AA_color = src->AA_color; dst->AA_dont_blend = src->AA_dont_blend; dst->cx1 = src->cx1; dst->cy1 = src->cy1; dst->cx2 = src->cx2; dst->cy2 = src->cy2; dst->res_x = src->res_x; dst->res_y = src->res_x; dst->paletteQuantizationMethod = src->paletteQuantizationMethod; dst->paletteQuantizationSpeed = src->paletteQuantizationSpeed; dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality; dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality; dst->interpolation_id = src->interpolation_id; dst->interpolation = src->interpolation; if (src->brush) { dst->brush = gdImageClone(src->brush); } if (src->tile) { dst->tile = gdImageClone(src->tile); } if (src->style) { gdImageSetStyle(dst, src->style, src->styleLength); } for (i = 0; i < gdMaxColors; i++) { dst->brushColorMap[i] = src->brushColorMap[i]; dst->tileColorMap[i] = src->tileColorMap[i]; } if (src->polyAllocated > 0) { dst->polyAllocated = src->polyAllocated; for (i = 0; i < src->polyAllocated; i++) { dst->polyInts[i] = src->polyInts[i]; } } return dst; } BGD_DECLARE(void) gdImageCopy (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h) { int c; int x, y; int tox, toy; int i; int colorMap[gdMaxColors]; if (dst->trueColor) { /* 2.0: much easier when the destination is truecolor. */ /* 2.0.10: needs a transparent-index check that is still valid if * * the source is not truecolor. Thanks to Frank Warmerdam. */ if (src->trueColor) { for (y = 0; (y < h); y++) { for (x = 0; (x < w); x++) { int c = gdImageGetTrueColorPixel (src, srcX + x, srcY + y); if (c != src->transparent) { gdImageSetPixel (dst, dstX + x, dstY + y, c); } } } } else { /* source is palette based */ for (y = 0; (y < h); y++) { for (x = 0; (x < w); x++) { int c = gdImageGetPixel (src, srcX + x, srcY + y); if (c != src->transparent) { gdImageSetPixel(dst, dstX + x, dstY + y, gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c])); } } } } return; } for (i = 0; (i < gdMaxColors); i++) { colorMap[i] = (-1); } toy = dstY; for (y = srcY; (y < (srcY + h)); y++) { tox = dstX; for (x = srcX; (x < (srcX + w)); x++) { int nc; int mapTo; c = gdImageGetPixel (src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent (src) == c) { tox++; continue; } /* Have we established a mapping for this color? */ if (src->trueColor) { /* 2.05: remap to the palette available in the destination image. This is slow and works badly, but it beats crashing! Thanks to Padhrig McCarthy. */ mapTo = gdImageColorResolveAlpha (dst, gdTrueColorGetRed (c), gdTrueColorGetGreen (c), gdTrueColorGetBlue (c), gdTrueColorGetAlpha (c)); } else if (colorMap[c] == (-1)) { /* If it's the same image, mapping is trivial */ if (dst == src) { nc = c; } else { /* Get best match possible. This function never returns error. */ nc = gdImageColorResolveAlpha (dst, src->red[c], src->green[c], src->blue[c], src->alpha[c]); } colorMap[c] = nc; mapTo = colorMap[c]; } else { mapTo = colorMap[c]; } gdImageSetPixel (dst, tox, toy, mapTo); tox++; } toy++; } } /* This function is a substitute for real alpha channel operations, so it doesn't pay attention to the alpha channel. */ BGD_DECLARE(void) gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct) { int c, dc; int x, y; int tox, toy; int ncR, ncG, ncB; toy = dstY; for (y = srcY; (y < (srcY + h)); y++) { tox = dstX; for (x = srcX; (x < (srcX + w)); x++) { int nc; c = gdImageGetPixel (src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent (src) == c) { tox++; continue; } /* If it's the same image, mapping is trivial */ if (dst == src) { nc = c; } else { dc = gdImageGetPixel (dst, tox, toy); ncR = gdImageRed (src, c) * (pct / 100.0) + gdImageRed (dst, dc) * ((100 - pct) / 100.0); ncG = gdImageGreen (src, c) * (pct / 100.0) + gdImageGreen (dst, dc) * ((100 - pct) / 100.0); ncB = gdImageBlue (src, c) * (pct / 100.0) + gdImageBlue (dst, dc) * ((100 - pct) / 100.0); /* Find a reasonable color */ nc = gdImageColorResolve (dst, ncR, ncG, ncB); } gdImageSetPixel (dst, tox, toy, nc); tox++; } toy++; } } /* This function is a substitute for real alpha channel operations, so it doesn't pay attention to the alpha channel. */ BGD_DECLARE(void) gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct) { int c, dc; int x, y; int tox, toy; int ncR, ncG, ncB; float g; toy = dstY; for (y = srcY; (y < (srcY + h)); y++) { tox = dstX; for (x = srcX; (x < (srcX + w)); x++) { int nc; c = gdImageGetPixel (src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent (src) == c) { tox++; continue; } /* * If it's the same image, mapping is NOT trivial since we * merge with greyscale target, but if pct is 100, the grey * value is not used, so it becomes trivial. pjw 2.0.12. */ if (dst == src && pct == 100) { nc = c; } else { dc = gdImageGetPixel (dst, tox, toy); g = 0.29900 * gdImageRed(dst, dc) + 0.58700 * gdImageGreen(dst, dc) + 0.11400 * gdImageBlue(dst, dc); ncR = gdImageRed (src, c) * (pct / 100.0) + g * ((100 - pct) / 100.0); ncG = gdImageGreen (src, c) * (pct / 100.0) + g * ((100 - pct) / 100.0); ncB = gdImageBlue (src, c) * (pct / 100.0) + g * ((100 - pct) / 100.0); /* First look for an exact match */ nc = gdImageColorExact (dst, ncR, ncG, ncB); if (nc == (-1)) { /* No, so try to allocate it */ nc = gdImageColorAllocate (dst, ncR, ncG, ncB); /* If we're out of colors, go for the closest color */ if (nc == (-1)) { nc = gdImageColorClosest (dst, ncR, ncG, ncB); } } } gdImageSetPixel (dst, tox, toy, nc); tox++; } toy++; } } BGD_DECLARE(void) gdImageCopyResized (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int dstW, int dstH, int srcW, int srcH) { int c; int x, y; int tox, toy; int ydest; int i; int colorMap[gdMaxColors]; /* Stretch vectors */ int *stx; int *sty; /* We only need to use floating point to determine the correct stretch vector for one line's worth. */ if (overflow2(sizeof (int), srcW)) { return; } if (overflow2(sizeof (int), srcH)) { return; } stx = (int *) gdMalloc (sizeof (int) * srcW); if (!stx) { return; } sty = (int *) gdMalloc (sizeof (int) * srcH); if (!sty) { gdFree(stx); return; } /* Fixed by Mao Morimoto 2.0.16 */ for (i = 0; (i < srcW); i++) { stx[i] = dstW * (i + 1) / srcW - dstW * i / srcW; } for (i = 0; (i < srcH); i++) { sty[i] = dstH * (i + 1) / srcH - dstH * i / srcH; } for (i = 0; (i < gdMaxColors); i++) { colorMap[i] = (-1); } toy = dstY; for (y = srcY; (y < (srcY + srcH)); y++) { for (ydest = 0; (ydest < sty[y - srcY]); ydest++) { tox = dstX; for (x = srcX; (x < (srcX + srcW)); x++) { int nc = 0; int mapTo; if (!stx[x - srcX]) { continue; } if (dst->trueColor) { /* 2.0.9: Thorben Kundinger: Maybe the source image is not a truecolor image */ if (!src->trueColor) { int tmp = gdImageGetPixel (src, x, y); mapTo = gdImageGetTrueColorPixel (src, x, y); if (gdImageGetTransparent (src) == tmp) { /* 2.0.21, TK: not tox++ */ tox += stx[x - srcX]; continue; } } else { /* TK: old code follows */ mapTo = gdImageGetTrueColorPixel (src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent (src) == mapTo) { /* 2.0.21, TK: not tox++ */ tox += stx[x - srcX]; continue; } } } else { c = gdImageGetPixel (src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent (src) == c) { tox += stx[x - srcX]; continue; } if (src->trueColor) { /* Remap to the palette available in the destination image. This is slow and works badly. */ mapTo = gdImageColorResolveAlpha (dst, gdTrueColorGetRed (c), gdTrueColorGetGreen (c), gdTrueColorGetBlue (c), gdTrueColorGetAlpha (c)); } else { /* Have we established a mapping for this color? */ if (colorMap[c] == (-1)) { /* If it's the same image, mapping is trivial */ if (dst == src) { nc = c; } else { /* Find or create the best match */ /* 2.0.5: can't use gdTrueColorGetRed, etc with palette */ nc = gdImageColorResolveAlpha (dst, gdImageRed (src, c), gdImageGreen (src, c), gdImageBlue (src, c), gdImageAlpha (src, c)); } colorMap[c] = nc; } mapTo = colorMap[c]; } } for (i = 0; (i < stx[x - srcX]); i++) { gdImageSetPixel (dst, tox, toy, mapTo); tox++; } } toy++; } } gdFree (stx); gdFree (sty); } /* gd 2.0.8: gdImageCopyRotated is added. Source is a rectangle, with its upper left corner at srcX and srcY. Destination is the *center* of the rotated copy. Angle is in degrees, same as gdImageArc. Floating point destination center coordinates allow accurate rotation of objects of odd-numbered width or height. */ BGD_DECLARE(void) gdImageCopyRotated (gdImagePtr dst, gdImagePtr src, double dstX, double dstY, int srcX, int srcY, int srcWidth, int srcHeight, int angle) { double dx, dy; double radius = sqrt (srcWidth * srcWidth + srcHeight * srcHeight); double aCos = cos (angle * .0174532925); double aSin = sin (angle * .0174532925); double scX = srcX + ((double) srcWidth) / 2; double scY = srcY + ((double) srcHeight) / 2; int cmap[gdMaxColors]; int i; /* 2.0.34: transparency preservation. The transparentness of the transparent color is more important than its hue. */ if (src->transparent != -1) { if (dst->transparent == -1) { dst->transparent = src->transparent; } } for (i = 0; (i < gdMaxColors); i++) { cmap[i] = (-1); } for (dy = dstY - radius; (dy <= dstY + radius); dy++) { for (dx = dstX - radius; (dx <= dstX + radius); dx++) { double sxd = (dx - dstX) * aCos - (dy - dstY) * aSin; double syd = (dy - dstY) * aCos + (dx - dstX) * aSin; int sx = sxd + scX; int sy = syd + scY; if ((sx >= srcX) && (sx < srcX + srcWidth) && (sy >= srcY) && (sy < srcY + srcHeight)) { int c = gdImageGetPixel (src, sx, sy); /* 2.0.34: transparency wins */ if (c == src->transparent) { gdImageSetPixel (dst, dx, dy, dst->transparent); } else if (!src->trueColor) { /* Use a table to avoid an expensive lookup on every single pixel */ if (cmap[c] == -1) { cmap[c] = gdImageColorResolveAlpha (dst, gdImageRed (src, c), gdImageGreen (src, c), gdImageBlue (src, c), gdImageAlpha (src, c)); } gdImageSetPixel (dst, dx, dy, cmap[c]); } else { gdImageSetPixel (dst, dx, dy, gdImageColorResolveAlpha (dst, gdImageRed (src, c), gdImageGreen (src, c), gdImageBlue (src, c), gdImageAlpha (src, c))); } } } } } /* When gd 1.x was first created, floating point was to be avoided. These days it is often faster than table lookups or integer arithmetic. The routine below is shamelessly, gloriously floating point. TBB */ /* 2.0.10: cast instead of floor() yields 35% performance improvement. Thanks to John Buckman. */ #define floor2(exp) ((long) exp) /*#define floor2(exp) floor(exp)*/ BGD_DECLARE(void) gdImageCopyResampled (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int dstW, int dstH, int srcW, int srcH) { int x, y; double sy1, sy2, sx1, sx2; if (!dst->trueColor) { gdImageCopyResized (dst, src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH); return; } for (y = dstY; (y < dstY + dstH); y++) { sy1 = ((double) y - (double) dstY) * (double) srcH / (double) dstH; sy2 = ((double) (y + 1) - (double) dstY) * (double) srcH / (double) dstH; for (x = dstX; (x < dstX + dstW); x++) { double sx, sy; double spixels = 0; double red = 0.0, green = 0.0, blue = 0.0, alpha = 0.0; double alpha_sum = 0.0, contrib_sum = 0.0; sx1 = ((double) x - (double) dstX) * (double) srcW / dstW; sx2 = ((double) (x + 1) - (double) dstX) * (double) srcW / dstW; sy = sy1; do { double yportion; if (floor2 (sy) == floor2 (sy1)) { yportion = 1.0 - (sy - floor2 (sy)); if (yportion > sy2 - sy1) { yportion = sy2 - sy1; } sy = floor2 (sy); } else if (sy == floor2 (sy2)) { yportion = sy2 - floor2 (sy2); } else { yportion = 1.0; } sx = sx1; do { double xportion; double pcontribution; int p; if (floor2 (sx) == floor2 (sx1)) { xportion = 1.0 - (sx - floor2 (sx)); if (xportion > sx2 - sx1) { xportion = sx2 - sx1; } sx = floor2 (sx); } else if (sx == floor2 (sx2)) { xportion = sx2 - floor2 (sx2); } else { xportion = 1.0; } pcontribution = xportion * yportion; /* 2.08: previously srcX and srcY were ignored. Andrew Pattison */ p = gdImageGetTrueColorPixel (src, (int) sx + srcX, (int) sy + srcY); red += gdTrueColorGetRed (p) * pcontribution; green += gdTrueColorGetGreen (p) * pcontribution; blue += gdTrueColorGetBlue (p) * pcontribution; alpha += gdTrueColorGetAlpha (p) * pcontribution; spixels += xportion * yportion; sx += 1.0; } while (sx < sx2); sy += 1.0; } while (sy < sy2); if (spixels != 0.0) { red /= spixels; green /= spixels; blue /= spixels; alpha /= spixels; alpha += 0.5; } if ( alpha_sum != 0.0f) { if( contrib_sum != 0.0f) { alpha_sum /= contrib_sum; } red /= alpha_sum; green /= alpha_sum; blue /= alpha_sum; } /* Clamping to allow for rounding errors above */ if (red > 255.0) { red = 255.0; } if (green > 255.0) { green = 255.0; } if (blue > 255.0) { blue = 255.0; } if (alpha > gdAlphaMax) { alpha = gdAlphaMax; } gdImageSetPixel (dst, x, y, gdTrueColorAlpha ((int) red, (int) green, (int) blue, (int) alpha)); } } } BGD_DECLARE(void) gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c) { if (n <= 0) { return; } gdImageLine (im, p->x, p->y, p[n - 1].x, p[n - 1].y, c); gdImageOpenPolygon (im, p, n, c); } BGD_DECLARE(void) gdImageOpenPolygon (gdImagePtr im, gdPointPtr p, int n, int c) { int i; int lx, ly; if (n <= 0) { return; } lx = p->x; ly = p->y; for (i = 1; (i < n); i++) { p++; gdImageLine (im, lx, ly, p->x, p->y, c); lx = p->x; ly = p->y; } } /* THANKS to Kirsten Schulz for the polygon fixes! */ /* The intersection finding technique of this code could be improved */ /* by remembering the previous intertersection, and by using the slope. */ /* That could help to adjust intersections to produce a nice */ /* interior_extrema. */ BGD_DECLARE(void) gdImageFilledPolygon (gdImagePtr im, gdPointPtr p, int n, int c) { int i; int j; int index; int y; int miny, maxy, pmaxy; int x1, y1; int x2, y2; int ind1, ind2; int ints; int fill_color; if (n <= 0) { return; } if (c == gdAntiAliased) { fill_color = im->AA_color; } else { fill_color = c; } if (!im->polyAllocated) { if (overflow2(sizeof (int), n)) { return; } im->polyInts = (int *) gdMalloc (sizeof (int) * n); if (!im->polyInts) { return; } im->polyAllocated = n; } if (im->polyAllocated < n) { while (im->polyAllocated < n) { im->polyAllocated *= 2; } if (overflow2(sizeof (int), im->polyAllocated)) { return; } im->polyInts = (int *) gdReallocEx (im->polyInts, sizeof (int) * im->polyAllocated); if (!im->polyInts) { return; } } miny = p[0].y; maxy = p[0].y; for (i = 1; (i < n); i++) { if (p[i].y < miny) { miny = p[i].y; } if (p[i].y > maxy) { maxy = p[i].y; } } pmaxy = maxy; /* 2.0.16: Optimization by Ilia Chipitsine -- don't waste time offscreen */ /* 2.0.26: clipping rectangle is even better */ if (miny < im->cy1) { miny = im->cy1; } if (maxy > im->cy2) { maxy = im->cy2; } /* Fix in 1.3: count a vertex only once */ for (y = miny; (y <= maxy); y++) { ints = 0; for (i = 0; (i < n); i++) { if (!i) { ind1 = n - 1; ind2 = 0; } else { ind1 = i - 1; ind2 = i; } y1 = p[ind1].y; y2 = p[ind2].y; if (y1 < y2) { x1 = p[ind1].x; x2 = p[ind2].x; } else if (y1 > y2) { y2 = p[ind1].y; y1 = p[ind2].y; x2 = p[ind1].x; x1 = p[ind2].x; } else { continue; } /* Do the following math as float intermediately, and round to ensure * that Polygon and FilledPolygon for the same set of points have the * same footprint. */ if ((y >= y1) && (y < y2)) { im->polyInts[ints++] = (int) ((float) ((y - y1) * (x2 - x1)) / (float) (y2 - y1) + 0.5 + x1); } else if ((y == pmaxy) && (y == y2)) { im->polyInts[ints++] = x2; } } /* 2.0.26: polygons pretty much always have less than 100 points, and most of the time they have considerably less. For such trivial cases, insertion sort is a good choice. Also a good choice for future implementations that may wish to indirect through a table. */ for (i = 1; (i < ints); i++) { index = im->polyInts[i]; j = i; while ((j > 0) && (im->polyInts[j - 1] > index)) { im->polyInts[j] = im->polyInts[j - 1]; j--; } im->polyInts[j] = index; } for (i = 0; (i < (ints-1)); i += 2) { /* 2.0.29: back to gdImageLine to prevent segfaults when performing a pattern fill */ gdImageLine (im, im->polyInts[i], y, im->polyInts[i + 1], y, fill_color); } } /* If we are drawing this AA, then redraw the border with AA lines. */ /* This doesn't work as well as I'd like, but it doesn't clash either. */ if (c == gdAntiAliased) { gdImagePolygon (im, p, n, c); } } static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t); BGD_DECLARE(void) gdImageSetStyle (gdImagePtr im, int *style, int noOfPixels) { if (im->style) { gdFree (im->style); } if (overflow2(sizeof (int), noOfPixels)) { return; } im->style = (int *) gdMalloc (sizeof (int) * noOfPixels); if (!im->style) { return; } memcpy (im->style, style, sizeof (int) * noOfPixels); im->styleLength = noOfPixels; im->stylePos = 0; } BGD_DECLARE(void) gdImageSetThickness (gdImagePtr im, int thickness) { im->thick = thickness; } BGD_DECLARE(void) gdImageSetBrush (gdImagePtr im, gdImagePtr brush) { int i; im->brush = brush; if ((!im->trueColor) && (!im->brush->trueColor)) { for (i = 0; (i < gdImageColorsTotal (brush)); i++) { int index; index = gdImageColorResolveAlpha (im, gdImageRed (brush, i), gdImageGreen (brush, i), gdImageBlue (brush, i), gdImageAlpha (brush, i)); im->brushColorMap[i] = index; } } } BGD_DECLARE(void) gdImageSetTile (gdImagePtr im, gdImagePtr tile) { int i; im->tile = tile; if ((!im->trueColor) && (!im->tile->trueColor)) { for (i = 0; (i < gdImageColorsTotal (tile)); i++) { int index; index = gdImageColorResolveAlpha (im, gdImageRed (tile, i), gdImageGreen (tile, i), gdImageBlue (tile, i), gdImageAlpha (tile, i)); im->tileColorMap[i] = index; } } } BGD_DECLARE(void) gdImageSetAntiAliased (gdImagePtr im, int c) { im->AA = 1; im->AA_color = c; im->AA_dont_blend = -1; } BGD_DECLARE(void) gdImageSetAntiAliasedDontBlend (gdImagePtr im, int c, int dont_blend) { im->AA = 1; im->AA_color = c; im->AA_dont_blend = dont_blend; } BGD_DECLARE(void) gdImageInterlace (gdImagePtr im, int interlaceArg) { im->interlace = interlaceArg; } BGD_DECLARE(int) gdImageCompare (gdImagePtr im1, gdImagePtr im2) { int x, y; int p1, p2; int cmpStatus = 0; int sx, sy; if (im1->interlace != im2->interlace) { cmpStatus |= GD_CMP_INTERLACE; } if (im1->transparent != im2->transparent) { cmpStatus |= GD_CMP_TRANSPARENT; } if (im1->trueColor != im2->trueColor) { cmpStatus |= GD_CMP_TRUECOLOR; } sx = im1->sx; if (im1->sx != im2->sx) { cmpStatus |= GD_CMP_SIZE_X + GD_CMP_IMAGE; if (im2->sx < im1->sx) { sx = im2->sx; } } sy = im1->sy; if (im1->sy != im2->sy) { cmpStatus |= GD_CMP_SIZE_Y + GD_CMP_IMAGE; if (im2->sy < im1->sy) { sy = im2->sy; } } if (im1->colorsTotal != im2->colorsTotal) { cmpStatus |= GD_CMP_NUM_COLORS; } for (y = 0; (y < sy); y++) { for (x = 0; (x < sx); x++) { p1 = im1->trueColor ? gdImageTrueColorPixel (im1, x, y) : gdImagePalettePixel (im1, x, y); p2 = im2->trueColor ? gdImageTrueColorPixel (im2, x, y) : gdImagePalettePixel (im2, x, y); if (gdImageRed (im1, p1) != gdImageRed (im2, p2)) { cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE; break; } if (gdImageGreen (im1, p1) != gdImageGreen (im2, p2)) { cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE; break; } if (gdImageBlue (im1, p1) != gdImageBlue (im2, p2)) { cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE; break; } #if 0 /* Soon we'll add alpha channel to palettes */ if (gdImageAlpha (im1, p1) != gdImageAlpha (im2, p2)) { cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE; break; } #endif } if (cmpStatus & GD_CMP_COLOR) { break; }; } return cmpStatus; } /* Thanks to Frank Warmerdam for this superior implementation of gdAlphaBlend(), which merges alpha in the destination color much better. */ BGD_DECLARE(int) gdAlphaBlend (int dst, int src) { int src_alpha = gdTrueColorGetAlpha(src); int dst_alpha, alpha, red, green, blue; int src_weight, dst_weight, tot_weight; /* -------------------------------------------------------------------- */ /* Simple cases we want to handle fast. */ /* -------------------------------------------------------------------- */ if( src_alpha == gdAlphaOpaque ) return src; dst_alpha = gdTrueColorGetAlpha(dst); if( src_alpha == gdAlphaTransparent ) return dst; if( dst_alpha == gdAlphaTransparent ) return src; /* -------------------------------------------------------------------- */ /* What will the source and destination alphas be? Note that */ /* the destination weighting is substantially reduced as the */ /* overlay becomes quite opaque. */ /* -------------------------------------------------------------------- */ src_weight = gdAlphaTransparent - src_alpha; dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax; tot_weight = src_weight + dst_weight; /* -------------------------------------------------------------------- */ /* What red, green and blue result values will we use? */ /* -------------------------------------------------------------------- */ alpha = src_alpha * dst_alpha / gdAlphaMax; red = (gdTrueColorGetRed(src) * src_weight + gdTrueColorGetRed(dst) * dst_weight) / tot_weight; green = (gdTrueColorGetGreen(src) * src_weight + gdTrueColorGetGreen(dst) * dst_weight) / tot_weight; blue = (gdTrueColorGetBlue(src) * src_weight + gdTrueColorGetBlue(dst) * dst_weight) / tot_weight; /* -------------------------------------------------------------------- */ /* Return merged result. */ /* -------------------------------------------------------------------- */ return ((alpha << 24) + (red << 16) + (green << 8) + blue); } static int gdAlphaOverlayColor (int src, int dst, int max ); BGD_DECLARE(int) gdLayerOverlay (int dst, int src) { int a1, a2; a1 = gdAlphaMax - gdTrueColorGetAlpha(dst); a2 = gdAlphaMax - gdTrueColorGetAlpha(src); return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) + (gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) + (gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) + (gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax )) ); } /* Apply 'overlay' effect - background pixels are colourised by the foreground colour */ static int gdAlphaOverlayColor (int src, int dst, int max ) { dst = dst << 1; if( dst > max ) { /* in the "light" zone */ return dst + (src << 1) - (dst * src / max) - max; } else { /* in the "dark" zone */ return dst * src / max; } } /* Apply 'multiply' effect */ BGD_DECLARE(int) gdLayerMultiply (int dst, int src) { int a1, a2, r1, r2, g1, g2, b1, b2; a1 = gdAlphaMax - gdTrueColorGetAlpha(src); a2 = gdAlphaMax - gdTrueColorGetAlpha(dst); r1 = gdRedMax - (a1 * (gdRedMax - gdTrueColorGetRed(src))) / gdAlphaMax; r2 = gdRedMax - (a2 * (gdRedMax - gdTrueColorGetRed(dst))) / gdAlphaMax; g1 = gdGreenMax - (a1 * (gdGreenMax - gdTrueColorGetGreen(src))) / gdAlphaMax; g2 = gdGreenMax - (a2 * (gdGreenMax - gdTrueColorGetGreen(dst))) / gdAlphaMax; b1 = gdBlueMax - (a1 * (gdBlueMax - gdTrueColorGetBlue(src))) / gdAlphaMax; b2 = gdBlueMax - (a2 * (gdBlueMax - gdTrueColorGetBlue(dst))) / gdAlphaMax ; a1 = gdAlphaMax - a1; a2 = gdAlphaMax - a2; return ( ((a1*a2/gdAlphaMax) << 24) + ((r1*r2/gdRedMax) << 16) + ((g1*g2/gdGreenMax) << 8) + ((b1*b2/gdBlueMax)) ); } BGD_DECLARE(void) gdImageAlphaBlending (gdImagePtr im, int alphaBlendingArg) { im->alphaBlendingFlag = alphaBlendingArg; } BGD_DECLARE(void) gdImageSaveAlpha (gdImagePtr im, int saveAlphaArg) { im->saveAlphaFlag = saveAlphaArg; } BGD_DECLARE(void) gdImageSetClip (gdImagePtr im, int x1, int y1, int x2, int y2) { if (x1 < 0) { x1 = 0; } if (x1 >= im->sx) { x1 = im->sx - 1; } if (x2 < 0) { x2 = 0; } if (x2 >= im->sx) { x2 = im->sx - 1; } if (y1 < 0) { y1 = 0; } if (y1 >= im->sy) { y1 = im->sy - 1; } if (y2 < 0) { y2 = 0; } if (y2 >= im->sy) { y2 = im->sy - 1; } im->cx1 = x1; im->cy1 = y1; im->cx2 = x2; im->cy2 = y2; } BGD_DECLARE(void) gdImageGetClip (gdImagePtr im, int *x1P, int *y1P, int *x2P, int *y2P) { *x1P = im->cx1; *y1P = im->cy1; *x2P = im->cx2; *y2P = im->cy2; } BGD_DECLARE(void) gdImageSetResolution(gdImagePtr im, const unsigned int res_x, const unsigned int res_y) { if (res_x > 0) im->res_x = res_x; if (res_y > 0) im->res_y = res_y; } /* * Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org) * */ #define BLEND_COLOR(a, nc, c, cc) \ nc = (cc) + (((((c) - (cc)) * (a)) + ((((c) - (cc)) * (a)) >> 8) + 0x80) >> 8); static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t) { int dr,dg,db,p,r,g,b; /* 2.0.34: watch out for out of range calls */ if (!gdImageBoundsSafeMacro(im, x, y)) { return; } p = gdImageGetPixel(im,x,y); /* TBB: we have to implement the dont_blend stuff to provide the full feature set of the old implementation */ if ((p == color) || ((p == im->AA_dont_blend) && (t != 0x00))) { return; } dr = gdTrueColorGetRed(color); dg = gdTrueColorGetGreen(color); db = gdTrueColorGetBlue(color); r = gdTrueColorGetRed(p); g = gdTrueColorGetGreen(p); b = gdTrueColorGetBlue(p); BLEND_COLOR(t, dr, r, dr); BLEND_COLOR(t, dg, g, dg); BLEND_COLOR(t, db, b, db); im->tpixels[y][x] = gdTrueColorAlpha(dr, dg, db, gdAlphaOpaque); } static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col) { /* keep them as 32bits */ long x, y, inc, frac; long dx, dy,tmp; int w, wid, wstart; int thick = im->thick; if (!im->trueColor) { /* TBB: don't crash when the image is of the wrong type */ gdImageLine(im, x1, y1, x2, y2, col); return; } /* TBB: use the clipping rectangle */ if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0) return; if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0) return; dx = x2 - x1; dy = y2 - y1; if (dx == 0 && dy == 0) { /* TBB: allow setting points */ gdImageSetAAPixelColor(im, x1, y1, col, 0xFF); return; } else { double ag; /* Cast the long to an int to avoid compiler warnings about truncation. * This isn't a problem as computed dy/dx values came from ints above. */ ag = fabs(abs((int)dy) < abs((int)dx) ? cos(atan2(dy, dx)) : sin(atan2(dy, dx))); if (ag != 0) { wid = thick / ag; } else { wid = 1; } if (wid == 0) { wid = 1; } } /* Axis aligned lines */ if (dx == 0) { gdImageVLine(im, x1, y1, y2, col); return; } else if (dy == 0) { gdImageHLine(im, y1, x1, x2, col); return; } if (abs((int)dx) > abs((int)dy)) { if (dx < 0) { tmp = x1; x1 = x2; x2 = tmp; tmp = y1; y1 = y2; y2 = tmp; dx = x2 - x1; dy = y2 - y1; } y = y1; inc = (dy * 65536) / dx; frac = 0; /* TBB: set the last pixel for consistency (<=) */ for (x = x1 ; x <= x2 ; x++) { wstart = y - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetAAPixelColor(im, x , w , col , (frac >> 8) & 0xFF); gdImageSetAAPixelColor(im, x , w + 1 , col, (~frac >> 8) & 0xFF); } frac += inc; if (frac >= 65536) { frac -= 65536; y++; } else if (frac < 0) { frac += 65536; y--; } } } else { if (dy < 0) { tmp = x1; x1 = x2; x2 = tmp; tmp = y1; y1 = y2; y2 = tmp; dx = x2 - x1; dy = y2 - y1; } x = x1; inc = (dx * 65536) / dy; frac = 0; /* TBB: set the last pixel for consistency (<=) */ for (y = y1 ; y <= y2 ; y++) { wstart = x - wid / 2; for (w = wstart; w < wstart + wid; w++) { gdImageSetAAPixelColor(im, w , y , col, (frac >> 8) & 0xFF); gdImageSetAAPixelColor(im, w + 1, y, col, (~frac >> 8) & 0xFF); } frac += inc; if (frac >= 65536) { frac -= 65536; x++; } else if (frac < 0) { frac += 65536; x--; } } } } /* convert a palette image to true color */ BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src) { unsigned int y; unsigned int yy; if (src == NULL) { return 0; } if (src->trueColor == 1) { return 1; } else { unsigned int x; const unsigned int sy = gdImageSY(src); const unsigned int sx = gdImageSX(src); src->tpixels = (int **) gdMalloc(sizeof(int *) * sy); if (src->tpixels == NULL) { return 0; } for (y = 0; y < sy; y++) { const unsigned char *src_row = src->pixels[y]; int * dst_row; /* no need to calloc it, we overwrite all pxl anyway */ src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int)); if (src->tpixels[y] == NULL) { goto clean_on_error; } dst_row = src->tpixels[y]; for (x = 0; x < sx; x++) { const unsigned char c = *(src_row + x); if (c == src->transparent) { *(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127); } else { *(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]); } } } } /* free old palette buffer (y is sy) */ for (yy = 0; yy < y; yy++) { gdFree(src->pixels[yy]); } gdFree(src->pixels); src->trueColor = 1; src->pixels = NULL; src->alphaBlendingFlag = 0; src->saveAlphaFlag = 1; return 1; clean_on_error: /* free new true color buffer (y is not allocated, have failed) */ for (yy = 0; yy < y; yy++) { gdFree(src->tpixels[yy]); } gdFree(src->tpixels); return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5512_0
crossvul-cpp_data_good_339_2
/* * card-muscle.c: Support for MuscleCard Applet from musclecard.com * * Copyright (C) 2006, Identity Alliance, Thomas Harning <support@identityalliance.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include "internal.h" #include "cardctl.h" #include "muscle.h" #include "muscle-filesystem.h" #include "types.h" #include "opensc.h" static struct sc_card_operations muscle_ops; static const struct sc_card_operations *iso_ops = NULL; static struct sc_card_driver muscle_drv = { "MuscleApplet", "muscle", &muscle_ops, NULL, 0, NULL }; static struct sc_atr_table muscle_atrs[] = { /* Tyfone JCOP 242R2 cards */ { "3b:6d:00:00:ff:54:79:66:6f:6e:65:20:32:34:32:52:32", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP242R2_NO_EXT_APDU, 0, NULL }, /* Aladdin eToken PRO USB 72K Java */ { "3b:d5:18:00:81:31:3a:7d:80:73:c8:21:10:30", NULL, NULL, SC_CARD_TYPE_MUSCLE_ETOKEN_72K, 0, NULL }, /* JCOP31 v2.4.1 contact interface */ { "3b:f8:13:00:00:81:31:fe:45:4a:43:4f:50:76:32:34:31:b7", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP241, 0, NULL }, /* JCOP31 v2.4.1 RF interface */ { "3b:88:80:01:4a:43:4f:50:76:32:34:31:5e", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP241, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; #define MUSCLE_DATA(card) ( (muscle_private_t*)card->drv_data ) #define MUSCLE_FS(card) ( ((muscle_private_t*)card->drv_data)->fs ) typedef struct muscle_private { sc_security_env_t env; unsigned short verifiedPins; mscfs_t *fs; int rsa_key_ref; } muscle_private_t; static int muscle_finish(sc_card_t *card) { muscle_private_t *priv = MUSCLE_DATA(card); mscfs_free(priv->fs); free(priv); return 0; } static u8 muscleAppletId[] = { 0xA0, 0x00,0x00,0x00, 0x01, 0x01 }; static int muscle_match_card(sc_card_t *card) { sc_apdu_t apdu; u8 response[64]; int r; /* Since we send an APDU, the card's logout function may be called... * however it's not always properly nulled out... */ card->ops->logout = NULL; if (msc_select_applet(card, muscleAppletId, sizeof muscleAppletId) == 1) { /* Muscle applet is present, check the protocol version to be sure */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2, 0x3C, 0x00, 0x00); apdu.cla = 0xB0; apdu.le = 64; apdu.resplen = 64; apdu.resp = response; r = sc_transmit_apdu(card, &apdu); if (r == SC_SUCCESS && response[0] == 0x01) { card->type = SC_CARD_TYPE_MUSCLE_V1; } else { card->type = SC_CARD_TYPE_MUSCLE_GENERIC; } return 1; } return 0; } /* Since Musclecard has a different ACL system then PKCS15 * objects need to have their READ/UPDATE/DELETE permissions mapped for files * and directory ACLS need to be set * For keys.. they have different ACLS, but are accessed in different locations, so it shouldn't be an issue here */ static unsigned short muscle_parse_singleAcl(const sc_acl_entry_t* acl) { unsigned short acl_entry = 0; while(acl) { int key = acl->key_ref; int method = acl->method; switch(method) { case SC_AC_NEVER: return 0xFFFF; /* Ignore... other items overwrite these */ case SC_AC_NONE: case SC_AC_UNKNOWN: break; case SC_AC_CHV: acl_entry |= (1 << key); /* Assuming key 0 == SO */ break; case SC_AC_AUT: case SC_AC_TERM: case SC_AC_PRO: default: /* Ignored */ break; } acl = acl->next; } return acl_entry; } static void muscle_parse_acls(const sc_file_t* file, unsigned short* read_perm, unsigned short* write_perm, unsigned short* delete_perm) { assert(read_perm && write_perm && delete_perm); *read_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_READ)); *write_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_UPDATE)); *delete_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_DELETE)); } static int muscle_create_directory(sc_card_t *card, sc_file_t *file) { mscfs_t *fs = MUSCLE_FS(card); msc_id objectId; u8* oid = objectId.id; unsigned id = file->id; unsigned short read_perm = 0, write_perm = 0, delete_perm = 0; int objectSize; int r; if(id == 0) /* No null name files */ return SC_ERROR_INVALID_ARGUMENTS; /* No nesting directories */ if(fs->currentPath[0] != 0x3F || fs->currentPath[1] != 0x00) return SC_ERROR_NOT_SUPPORTED; oid[0] = ((id & 0xFF00) >> 8) & 0xFF; oid[1] = id & 0xFF; oid[2] = oid[3] = 0; objectSize = file->size; muscle_parse_acls(file, &read_perm, &write_perm, &delete_perm); r = msc_create_object(card, objectId, objectSize, read_perm, write_perm, delete_perm); mscfs_clear_cache(fs); if(r >= 0) return 0; return r; } static int muscle_create_file(sc_card_t *card, sc_file_t *file) { mscfs_t *fs = MUSCLE_FS(card); int objectSize = file->size; unsigned short read_perm = 0, write_perm = 0, delete_perm = 0; msc_id objectId; int r; if(file->type == SC_FILE_TYPE_DF) return muscle_create_directory(card, file); if(file->type != SC_FILE_TYPE_WORKING_EF) return SC_ERROR_NOT_SUPPORTED; if(file->id == 0) /* No null name files */ return SC_ERROR_INVALID_ARGUMENTS; muscle_parse_acls(file, &read_perm, &write_perm, &delete_perm); mscfs_lookup_local(fs, file->id, &objectId); r = msc_create_object(card, objectId, objectSize, read_perm, write_perm, delete_perm); mscfs_clear_cache(fs); if(r >= 0) return 0; return r; } static int muscle_read_binary(sc_card_t *card, unsigned int idx, u8* buf, size_t count, unsigned long flags) { mscfs_t *fs = MUSCLE_FS(card); int r; msc_id objectId; u8* oid = objectId.id; mscfs_file_t *file; r = mscfs_check_selection(fs, -1); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); file = &fs->cache.array[fs->currentFileIndex]; objectId = file->objectId; /* memcpy(objectId.id, file->objectId.id, 4); */ if(!file->ef) { oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; } r = msc_read_object(card, objectId, idx, buf, count); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } static int muscle_update_binary(sc_card_t *card, unsigned int idx, const u8* buf, size_t count, unsigned long flags) { mscfs_t *fs = MUSCLE_FS(card); int r; mscfs_file_t *file; msc_id objectId; u8* oid = objectId.id; r = mscfs_check_selection(fs, -1); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); file = &fs->cache.array[fs->currentFileIndex]; objectId = file->objectId; /* memcpy(objectId.id, file->objectId.id, 4); */ if(!file->ef) { oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; } if(file->size < idx + count) { int newFileSize = idx + count; u8* buffer = malloc(newFileSize); if(buffer == NULL) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); r = msc_read_object(card, objectId, 0, buffer, file->size); /* TODO: RETRIEVE ACLS */ if(r < 0) goto update_bin_free_buffer; r = msc_delete_object(card, objectId, 0); if(r < 0) goto update_bin_free_buffer; r = msc_create_object(card, objectId, newFileSize, 0,0,0); if(r < 0) goto update_bin_free_buffer; memcpy(buffer + idx, buf, count); r = msc_update_object(card, objectId, 0, buffer, newFileSize); if(r < 0) goto update_bin_free_buffer; file->size = newFileSize; update_bin_free_buffer: free(buffer); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } else { r = msc_update_object(card, objectId, idx, buf, count); } /* mscfs_clear_cache(fs); */ return r; } /* TODO: Evaluate correctness */ static int muscle_delete_mscfs_file(sc_card_t *card, mscfs_file_t *file_data) { mscfs_t *fs = MUSCLE_FS(card); msc_id id = file_data->objectId; u8* oid = id.id; int r; if(!file_data->ef) { int x; mscfs_file_t *childFile; /* Delete children */ mscfs_check_cache(fs); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "DELETING Children of: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); for(x = 0; x < fs->cache.size; x++) { msc_id objectId; childFile = &fs->cache.array[x]; objectId = childFile->objectId; if(0 == memcmp(oid + 2, objectId.id, 2)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "DELETING: %02X%02X%02X%02X\n", objectId.id[0],objectId.id[1], objectId.id[2],objectId.id[3]); r = muscle_delete_mscfs_file(card, childFile); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } } oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; /* ??? objectId = objectId >> 16; */ } if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4)) || (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) { } r = msc_delete_object(card, id, 1); /* Check if its the root... this file generally is virtual * So don't return an error if it fails */ if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4)) || (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) return 0; if(r < 0) { printf("ID: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } return 0; } static int muscle_delete_file(sc_card_t *card, const sc_path_t *path_in) { mscfs_t *fs = MUSCLE_FS(card); mscfs_file_t *file_data = NULL; int r = 0; r = mscfs_loadFileInfo(fs, path_in->value, path_in->len, &file_data, NULL); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); r = muscle_delete_mscfs_file(card, file_data); mscfs_clear_cache(fs); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); return 0; } static void muscle_load_single_acl(sc_file_t* file, int operation, unsigned short acl) { int key; /* Everybody by default.... */ sc_file_add_acl_entry(file, operation, SC_AC_NONE, 0); if(acl == 0xFFFF) { sc_file_add_acl_entry(file, operation, SC_AC_NEVER, 0); return; } for(key = 0; key < 16; key++) { if(acl >> key & 1) { sc_file_add_acl_entry(file, operation, SC_AC_CHV, key); } } } static void muscle_load_file_acls(sc_file_t* file, mscfs_file_t *file_data) { muscle_load_single_acl(file, SC_AC_OP_READ, file_data->read); muscle_load_single_acl(file, SC_AC_OP_WRITE, file_data->write); muscle_load_single_acl(file, SC_AC_OP_UPDATE, file_data->write); muscle_load_single_acl(file, SC_AC_OP_DELETE, file_data->delete); } static void muscle_load_dir_acls(sc_file_t* file, mscfs_file_t *file_data) { muscle_load_single_acl(file, SC_AC_OP_SELECT, 0); muscle_load_single_acl(file, SC_AC_OP_LIST_FILES, 0); muscle_load_single_acl(file, SC_AC_OP_LOCK, 0xFFFF); muscle_load_single_acl(file, SC_AC_OP_DELETE, file_data->delete); muscle_load_single_acl(file, SC_AC_OP_CREATE, file_data->write); } /* Required type = -1 for don't care, 1 for EF, 0 for DF */ static int select_item(sc_card_t *card, const sc_path_t *path_in, sc_file_t ** file_out, int requiredType) { mscfs_t *fs = MUSCLE_FS(card); mscfs_file_t *file_data = NULL; int pathlen = path_in->len; int r = 0; int objectIndex; u8* oid; mscfs_check_cache(fs); r = mscfs_loadFileInfo(fs, path_in->value, path_in->len, &file_data, &objectIndex); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); /* Check if its the right type */ if(requiredType >= 0 && requiredType != file_data->ef) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } oid = file_data->objectId.id; /* Is it a file or directory */ if(file_data->ef) { fs->currentPath[0] = oid[0]; fs->currentPath[1] = oid[1]; fs->currentFile[0] = oid[2]; fs->currentFile[1] = oid[3]; } else { fs->currentPath[0] = oid[pathlen - 2]; fs->currentPath[1] = oid[pathlen - 1]; fs->currentFile[0] = 0; fs->currentFile[1] = 0; } fs->currentFileIndex = objectIndex; if(file_out) { sc_file_t *file; file = sc_file_new(); file->path = *path_in; file->size = file_data->size; file->id = (oid[2] << 8) | oid[3]; if(!file_data->ef) { file->type = SC_FILE_TYPE_DF; } else { file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; } /* Setup ACLS */ if(file_data->ef) { muscle_load_file_acls(file, file_data); } else { muscle_load_dir_acls(file, file_data); /* Setup directory acls... */ } file->magic = SC_FILE_MAGIC; *file_out = file; } return 0; } static int muscle_select_file(sc_card_t *card, const sc_path_t *path_in, sc_file_t **file_out) { int r; assert(card != NULL && path_in != NULL); switch (path_in->type) { case SC_PATH_TYPE_FILE_ID: r = select_item(card, path_in, file_out, 1); break; case SC_PATH_TYPE_DF_NAME: r = select_item(card, path_in, file_out, 0); break; case SC_PATH_TYPE_PATH: r = select_item(card, path_in, file_out, -1); break; default: SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if(r > 0) r = 0; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } static int _listFile(mscfs_file_t *file, int reset, void *udata) { int next = reset ? 0x00 : 0x01; return msc_list_objects( (sc_card_t*)udata, next, file); } static int muscle_init(sc_card_t *card) { muscle_private_t *priv; card->name = "MuscleApplet"; card->drv_data = malloc(sizeof(muscle_private_t)); if(!card->drv_data) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); } memset(card->drv_data, 0, sizeof(muscle_private_t)); priv = MUSCLE_DATA(card); priv->verifiedPins = 0; priv->fs = mscfs_new(); if(!priv->fs) { free(card->drv_data); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); } priv->fs->udata = card; priv->fs->listFile = _listFile; card->cla = 0xB0; card->flags |= SC_CARD_FLAG_RNG; card->caps |= SC_CARD_CAP_RNG; /* Card type detection */ _sc_match_atr(card, muscle_atrs, &card->type); if(card->type == SC_CARD_TYPE_MUSCLE_ETOKEN_72K) { card->caps |= SC_CARD_CAP_APDU_EXT; } if(card->type == SC_CARD_TYPE_MUSCLE_JCOP241) { card->caps |= SC_CARD_CAP_APDU_EXT; } if (!(card->caps & SC_CARD_CAP_APDU_EXT)) { card->max_recv_size = 255; card->max_send_size = 255; } if(card->type == SC_CARD_TYPE_MUSCLE_JCOP242R2_NO_EXT_APDU) { /* Tyfone JCOP v242R2 card that doesn't support extended APDUs */ } /* FIXME: Card type detection */ if (1) { unsigned long flags; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_HASH_NONE; flags |= SC_ALGORITHM_ONBOARD_KEY_GEN; _sc_card_add_rsa_alg(card, 1024, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return SC_SUCCESS; } static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid = fs->cache.array[x].objectId.id; if (bufLen < 2) break; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count += 2; bufLen -= 2; } } return count; } static int muscle_pin_cmd(sc_card_t *card, struct sc_pin_cmd_data *cmd, int *tries_left) { muscle_private_t* priv = MUSCLE_DATA(card); const int bufferLength = MSC_MAX_PIN_COMMAND_LENGTH; u8 buffer[MSC_MAX_PIN_COMMAND_LENGTH]; switch(cmd->cmd) { case SC_PIN_CMD_VERIFY: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; int r; msc_verify_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len); cmd->apdu = &apdu; cmd->pin1.offset = 5; r = iso_ops->pin_cmd(card, cmd, tries_left); if(r >= 0) priv->verifiedPins |= (1 << cmd->pin_reference); return r; } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } case SC_PIN_CMD_CHANGE: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; msc_change_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len, cmd->pin2.data, cmd->pin2.len); cmd->apdu = &apdu; return iso_ops->pin_cmd(card, cmd, tries_left); } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } case SC_PIN_CMD_UNBLOCK: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; msc_unblock_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len); cmd->apdu = &apdu; return iso_ops->pin_cmd(card, cmd, tries_left); } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported command\n"); return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_extract_key(sc_card_t *card, sc_cardctl_muscle_key_info_t *info) { /* CURRENTLY DONT SUPPORT EXTRACTING PRIVATE KEYS... */ switch(info->keyType) { case 1: /* RSA */ return msc_extract_rsa_public_key(card, info->keyLocation, &info->modLength, &info->modValue, &info->expLength, &info->expValue); default: return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_import_key(sc_card_t *card, sc_cardctl_muscle_key_info_t *info) { /* CURRENTLY DONT SUPPORT EXTRACTING PRIVATE KEYS... */ switch(info->keyType) { case 0x02: /* RSA_PRIVATE */ case 0x03: /* RSA_PRIVATE_CRT */ return msc_import_key(card, info->keyLocation, info); default: return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_generate_key(sc_card_t *card, sc_cardctl_muscle_gen_key_info_t *info) { return msc_generate_keypair(card, info->privateKeyLocation, info->publicKeyLocation, info->keyType, info->keySize, 0); } static int muscle_card_verified_pins(sc_card_t *card, sc_cardctl_muscle_verified_pins_info_t *info) { muscle_private_t* priv = MUSCLE_DATA(card); info->verifiedPins = priv->verifiedPins; return 0; } static int muscle_card_ctl(sc_card_t *card, unsigned long request, void *data) { switch(request) { case SC_CARDCTL_MUSCLE_GENERATE_KEY: return muscle_card_generate_key(card, (sc_cardctl_muscle_gen_key_info_t*) data); case SC_CARDCTL_MUSCLE_EXTRACT_KEY: return muscle_card_extract_key(card, (sc_cardctl_muscle_key_info_t*) data); case SC_CARDCTL_MUSCLE_IMPORT_KEY: return muscle_card_import_key(card, (sc_cardctl_muscle_key_info_t*) data); case SC_CARDCTL_MUSCLE_VERIFIED_PINS: return muscle_card_verified_pins(card, (sc_cardctl_muscle_verified_pins_info_t*) data); default: return SC_ERROR_NOT_SUPPORTED; /* Unsupported.. whatever it is */ } } static int muscle_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { muscle_private_t* priv = MUSCLE_DATA(card); if (env->operation != SC_SEC_OPERATION_SIGN && env->operation != SC_SEC_OPERATION_DECIPHER) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid crypto operation supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } if (env->algorithm != SC_ALGORITHM_RSA) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid crypto algorithm supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } /* ADJUST FOR PKCS1 padding support for decryption only */ if ((env->algorithm_flags & SC_ALGORITHM_RSA_PADS) || (env->algorithm_flags & SC_ALGORITHM_RSA_HASHES)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Card supports only raw RSA.\n"); return SC_ERROR_NOT_SUPPORTED; } if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { if (env->key_ref_len != 1 || (env->key_ref[0] > 0x0F)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid key reference supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } priv->rsa_key_ref = env->key_ref[0]; } if (env->flags & SC_SEC_ENV_ALG_REF_PRESENT) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Algorithm reference not supported.\n"); return SC_ERROR_NOT_SUPPORTED; } /* if (env->flags & SC_SEC_ENV_FILE_REF_PRESENT) if (memcmp(env->file_ref.value, "\x00\x12", 2) != 0) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "File reference is not 0012.\n"); return SC_ERROR_NOT_SUPPORTED; } */ priv->env = *env; return 0; } static int muscle_restore_security_env(sc_card_t *card, int se_num) { muscle_private_t* priv = MUSCLE_DATA(card); memset(&priv->env, 0, sizeof(priv->env)); return 0; } static int muscle_decipher(sc_card_t * card, const u8 * crgram, size_t crgram_len, u8 * out, size_t out_len) { muscle_private_t* priv = MUSCLE_DATA(card); u8 key_id; int r; /* sanity check */ if (priv->env.operation != SC_SEC_OPERATION_DECIPHER) return SC_ERROR_INVALID_ARGUMENTS; key_id = priv->rsa_key_ref * 2; /* Private key */ if (out_len < crgram_len) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Output buffer too small"); return SC_ERROR_BUFFER_TOO_SMALL; } r = msc_compute_crypt(card, key_id, 0x00, /* RSA NO PADDING */ 0x04, /* decrypt */ crgram, out, crgram_len, out_len); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card signature failed"); return r; } static int muscle_compute_signature(sc_card_t *card, const u8 *data, size_t data_len, u8 * out, size_t outlen) { muscle_private_t* priv = MUSCLE_DATA(card); u8 key_id; int r; key_id = priv->rsa_key_ref * 2; /* Private key */ if (outlen < data_len) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Output buffer too small"); return SC_ERROR_BUFFER_TOO_SMALL; } r = msc_compute_crypt(card, key_id, 0x00, /* RSA NO PADDING */ 0x04, /* -- decrypt raw... will do what we need since signing isn't yet supported */ data, out, data_len, outlen); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card signature failed"); return r; } static int muscle_get_challenge(sc_card_t *card, u8 *rnd, size_t len) { if (len == 0) return SC_SUCCESS; else { SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, msc_get_challenge(card, len, 0, NULL, rnd), "GET CHALLENGE cmd failed"); return (int) len; } } static int muscle_check_sw(sc_card_t * card, unsigned int sw1, unsigned int sw2) { if(sw1 == 0x9C) { switch(sw2) { case 0x01: /* SW_NO_MEMORY_LEFT */ return SC_ERROR_NOT_ENOUGH_MEMORY; case 0x02: /* SW_AUTH_FAILED */ return SC_ERROR_PIN_CODE_INCORRECT; case 0x03: /* SW_OPERATION_NOT_ALLOWED */ return SC_ERROR_NOT_ALLOWED; case 0x05: /* SW_UNSUPPORTED_FEATURE */ return SC_ERROR_NO_CARD_SUPPORT; case 0x06: /* SW_UNAUTHORIZED */ return SC_ERROR_SECURITY_STATUS_NOT_SATISFIED; case 0x07: /* SW_OBJECT_NOT_FOUND */ return SC_ERROR_FILE_NOT_FOUND; case 0x08: /* SW_OBJECT_EXISTS */ return SC_ERROR_FILE_ALREADY_EXISTS; case 0x09: /* SW_INCORRECT_ALG */ return SC_ERROR_INCORRECT_PARAMETERS; case 0x0B: /* SW_SIGNATURE_INVALID */ return SC_ERROR_CARD_CMD_FAILED; case 0x0C: /* SW_IDENTITY_BLOCKED */ return SC_ERROR_AUTH_METHOD_BLOCKED; case 0x0F: /* SW_INVALID_PARAMETER */ case 0x10: /* SW_INCORRECT_P1 */ case 0x11: /* SW_INCORRECT_P2 */ return SC_ERROR_INCORRECT_PARAMETERS; } } return iso_ops->check_sw(card, sw1, sw2); } static int muscle_card_reader_lock_obtained(sc_card_t *card, int was_reset) { int r = SC_SUCCESS; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (was_reset > 0) { if (msc_select_applet(card, muscleAppletId, sizeof muscleAppletId) != 1) { r = SC_ERROR_INVALID_CARD; } } LOG_FUNC_RETURN(card->ctx, r); } static struct sc_card_driver * sc_get_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; muscle_ops = *iso_drv->ops; muscle_ops.check_sw = muscle_check_sw; muscle_ops.pin_cmd = muscle_pin_cmd; muscle_ops.match_card = muscle_match_card; muscle_ops.init = muscle_init; muscle_ops.finish = muscle_finish; muscle_ops.get_challenge = muscle_get_challenge; muscle_ops.set_security_env = muscle_set_security_env; muscle_ops.restore_security_env = muscle_restore_security_env; muscle_ops.compute_signature = muscle_compute_signature; muscle_ops.decipher = muscle_decipher; muscle_ops.card_ctl = muscle_card_ctl; muscle_ops.read_binary = muscle_read_binary; muscle_ops.update_binary = muscle_update_binary; muscle_ops.create_file = muscle_create_file; muscle_ops.select_file = muscle_select_file; muscle_ops.delete_file = muscle_delete_file; muscle_ops.list_files = muscle_list_files; muscle_ops.card_reader_lock_obtained = muscle_card_reader_lock_obtained; return &muscle_drv; } struct sc_card_driver * sc_get_muscle_driver(void) { return sc_get_driver(); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_339_2
crossvul-cpp_data_good_339_6
/* * pkcs15-sc-hsm.c : Initialize PKCS#15 emulation * * Copyright (C) 2012 Andreas Schwier, CardContact, Minden, Germany * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #include "asn1.h" #include "common/compat_strlcpy.h" #include "common/compat_strnlen.h" #include "card-sc-hsm.h" extern struct sc_aid sc_hsm_aid; void sc_hsm_set_serialnr(sc_card_t *card, char *serial); static struct ec_curve curves[] = { { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x01", 8}, // secp192r1 aka prime192r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 24}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 24}, { (unsigned char *) "\x64\x21\x05\x19\xE5\x9C\x80\xE7\x0F\xA7\xE9\xAB\x72\x24\x30\x49\xFE\xB8\xDE\xEC\xC1\x46\xB9\xB1", 24}, { (unsigned char *) "\x04\x18\x8D\xA8\x0E\xB0\x30\x90\xF6\x7C\xBF\x20\xEB\x43\xA1\x88\x00\xF4\xFF\x0A\xFD\x82\xFF\x10\x12\x07\x19\x2B\x95\xFF\xC8\xDA\x78\x63\x10\x11\xED\x6B\x24\xCD\xD5\x73\xF9\x77\xA1\x1E\x79\x48\x11", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x99\xDE\xF8\x36\x14\x6B\xC9\xB1\xB4\xD2\x28\x31", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x07", 8}, // secp256r1 aka prime256r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 32}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 32}, { (unsigned char *) "\x5A\xC6\x35\xD8\xAA\x3A\x93\xE7\xB3\xEB\xBD\x55\x76\x98\x86\xBC\x65\x1D\x06\xB0\xCC\x53\xB0\xF6\x3B\xCE\x3C\x3E\x27\xD2\x60\x4B", 32}, { (unsigned char *) "\x04\x6B\x17\xD1\xF2\xE1\x2C\x42\x47\xF8\xBC\xE6\xE5\x63\xA4\x40\xF2\x77\x03\x7D\x81\x2D\xEB\x33\xA0\xF4\xA1\x39\x45\xD8\x98\xC2\x96\x4F\xE3\x42\xE2\xFE\x1A\x7F\x9B\x8E\xE7\xEB\x4A\x7C\x0F\x9E\x16\x2B\xCE\x33\x57\x6B\x31\x5E\xCE\xCB\xB6\x40\x68\x37\xBF\x51\xF5", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xBC\xE6\xFA\xAD\xA7\x17\x9E\x84\xF3\xB9\xCA\xC2\xFC\x63\x25\x51", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x03", 9}, // brainpoolP192r1 { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x30\x93\xD1\x8D\xB7\x8F\xCE\x47\x6D\xE1\xA8\x62\x97", 24}, { (unsigned char *) "\x6A\x91\x17\x40\x76\xB1\xE0\xE1\x9C\x39\xC0\x31\xFE\x86\x85\xC1\xCA\xE0\x40\xE5\xC6\x9A\x28\xEF", 24}, { (unsigned char *) "\x46\x9A\x28\xEF\x7C\x28\xCC\xA3\xDC\x72\x1D\x04\x4F\x44\x96\xBC\xCA\x7E\xF4\x14\x6F\xBF\x25\xC9", 24}, { (unsigned char *) "\x04\xC0\xA0\x64\x7E\xAA\xB6\xA4\x87\x53\xB0\x33\xC5\x6C\xB0\xF0\x90\x0A\x2F\x5C\x48\x53\x37\x5F\xD6\x14\xB6\x90\x86\x6A\xBD\x5B\xB8\x8B\x5F\x48\x28\xC1\x49\x00\x02\xE6\x77\x3F\xA2\xFA\x29\x9B\x8F", 49}, { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x2F\x9E\x9E\x91\x6B\x5B\xE8\xF1\x02\x9A\xC4\xAC\xC1", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x05", 9}, // brainpoolP224r1 { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD1\xD7\x87\xB0\x9F\x07\x57\x97\xDA\x89\xF5\x7E\xC8\xC0\xFF", 28}, { (unsigned char *) "\x68\xA5\xE6\x2C\xA9\xCE\x6C\x1C\x29\x98\x03\xA6\xC1\x53\x0B\x51\x4E\x18\x2A\xD8\xB0\x04\x2A\x59\xCA\xD2\x9F\x43", 28}, { (unsigned char *) "\x25\x80\xF6\x3C\xCF\xE4\x41\x38\x87\x07\x13\xB1\xA9\x23\x69\xE3\x3E\x21\x35\xD2\x66\xDB\xB3\x72\x38\x6C\x40\x0B", 28}, { (unsigned char *) "\x04\x0D\x90\x29\xAD\x2C\x7E\x5C\xF4\x34\x08\x23\xB2\xA8\x7D\xC6\x8C\x9E\x4C\xE3\x17\x4C\x1E\x6E\xFD\xEE\x12\xC0\x7D\x58\xAA\x56\xF7\x72\xC0\x72\x6F\x24\xC6\xB8\x9E\x4E\xCD\xAC\x24\x35\x4B\x9E\x99\xCA\xA3\xF6\xD3\x76\x14\x02\xCD", 57}, { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD0\xFB\x98\xD1\x16\xBC\x4B\x6D\xDE\xBC\xA3\xA5\xA7\x93\x9F", 28}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x07", 9}, // brainpoolP256r1 { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x72\x6E\x3B\xF6\x23\xD5\x26\x20\x28\x20\x13\x48\x1D\x1F\x6E\x53\x77", 32}, { (unsigned char *) "\x7D\x5A\x09\x75\xFC\x2C\x30\x57\xEE\xF6\x75\x30\x41\x7A\xFF\xE7\xFB\x80\x55\xC1\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9", 32}, { (unsigned char *) "\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9\xBB\xD7\x7C\xBF\x95\x84\x16\x29\x5C\xF7\xE1\xCE\x6B\xCC\xDC\x18\xFF\x8C\x07\xB6", 32}, { (unsigned char *) "\x04\x8B\xD2\xAE\xB9\xCB\x7E\x57\xCB\x2C\x4B\x48\x2F\xFC\x81\xB7\xAF\xB9\xDE\x27\xE1\xE3\xBD\x23\xC2\x3A\x44\x53\xBD\x9A\xCE\x32\x62\x54\x7E\xF8\x35\xC3\xDA\xC4\xFD\x97\xF8\x46\x1A\x14\x61\x1D\xC9\xC2\x77\x45\x13\x2D\xED\x8E\x54\x5C\x1D\x54\xC7\x2F\x04\x69\x97", 65}, { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x71\x8C\x39\x7A\xA3\xB5\x61\xA6\xF7\x90\x1E\x0E\x82\x97\x48\x56\xA7", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x09", 9}, // brainpoolP320r1 { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA6\xF6\xF4\x0D\xEF\x4F\x92\xB9\xEC\x78\x93\xEC\x28\xFC\xD4\x12\xB1\xF1\xB3\x2E\x27", 40}, { (unsigned char *) "\x3E\xE3\x0B\x56\x8F\xBA\xB0\xF8\x83\xCC\xEB\xD4\x6D\x3F\x3B\xB8\xA2\xA7\x35\x13\xF5\xEB\x79\xDA\x66\x19\x0E\xB0\x85\xFF\xA9\xF4\x92\xF3\x75\xA9\x7D\x86\x0E\xB4", 40}, { (unsigned char *) "\x52\x08\x83\x94\x9D\xFD\xBC\x42\xD3\xAD\x19\x86\x40\x68\x8A\x6F\xE1\x3F\x41\x34\x95\x54\xB4\x9A\xCC\x31\xDC\xCD\x88\x45\x39\x81\x6F\x5E\xB4\xAC\x8F\xB1\xF1\xA6", 40}, { (unsigned char *) "\x04\x43\xBD\x7E\x9A\xFB\x53\xD8\xB8\x52\x89\xBC\xC4\x8E\xE5\xBF\xE6\xF2\x01\x37\xD1\x0A\x08\x7E\xB6\xE7\x87\x1E\x2A\x10\xA5\x99\xC7\x10\xAF\x8D\x0D\x39\xE2\x06\x11\x14\xFD\xD0\x55\x45\xEC\x1C\xC8\xAB\x40\x93\x24\x7F\x77\x27\x5E\x07\x43\xFF\xED\x11\x71\x82\xEA\xA9\xC7\x78\x77\xAA\xAC\x6A\xC7\xD3\x52\x45\xD1\x69\x2E\x8E\xE1", 81}, { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA5\xB6\x8F\x12\xA3\x2D\x48\x2E\xC7\xEE\x86\x58\xE9\x86\x91\x55\x5B\x44\xC5\x93\x11", 40}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x1F", 5}, // secp192k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xEE\x37", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03", 24}, { (unsigned char *) "\x04\xDB\x4F\xF1\x0E\xC0\x57\xE9\xAE\x26\xB0\x7D\x02\x80\xB7\xF4\x34\x1D\xA5\xD1\xB1\xEA\xE0\x6C\x7D\x9B\x2F\x2F\x6D\x9C\x56\x28\xA7\x84\x41\x63\xD0\x15\xBE\x86\x34\x40\x82\xAA\x88\xD9\x5E\x2F\x9D", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\x26\xF2\xFC\x17\x0F\x69\x46\x6A\x74\xDE\xFD\x8D", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x0A", 5}, // secp256k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFC\x2F", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07", 32}, { (unsigned char *) "\x04\x79\xBE\x66\x7E\xF9\xDC\xBB\xAC\x55\xA0\x62\x95\xCE\x87\x0B\x07\x02\x9B\xFC\xDB\x2D\xCE\x28\xD9\x59\xF2\x81\x5B\x16\xF8\x17\x98\x48\x3A\xDA\x77\x26\xA3\xC4\x65\x5D\xA4\xFB\xFC\x0E\x11\x08\xA8\xFD\x17\xB4\x48\xA6\x85\x54\x19\x9C\x47\xD0\x8F\xFB\x10\xD4\xB8", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xBA\xAE\xDC\xE6\xAF\x48\xA0\x3B\xBF\xD2\x5E\x8C\xD0\x36\x41\x41", 32}, { (unsigned char *) "\x01", 1} }, { { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0} } }; #define C_ASN1_CVC_PUBKEY_SIZE 10 static const struct sc_asn1_entry c_asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE] = { { "publicKeyOID", SC_ASN1_OBJECT, SC_ASN1_UNI | SC_ASN1_OBJECT, 0, NULL, NULL }, { "primeOrModulus", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 1, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientAorExponent", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 2, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientB", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 3, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "basePointG", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 4, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "order", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 5, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "publicPoint", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 6, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "cofactor", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 7, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "modulusSize", SC_ASN1_INTEGER, SC_ASN1_UNI | SC_ASN1_INTEGER, SC_ASN1_OPTIONAL, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_BODY_SIZE 5 static const struct sc_asn1_entry c_asn1_cvc_body[C_ASN1_CVC_BODY_SIZE] = { { "certificateProfileIdentifier", SC_ASN1_INTEGER, SC_ASN1_APP | 0x1F29, 0, NULL, NULL }, { "certificationAuthorityReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "publicKey", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F49, 0, NULL, NULL }, { "certificateHolderReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 0x1F20, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVCERT_SIZE 3 static const struct sc_asn1_entry c_asn1_cvcert[C_ASN1_CVCERT_SIZE] = { { "certificateBody", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F4E, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_SIZE 2 static const struct sc_asn1_entry c_asn1_cvc[C_ASN1_CVC_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_AUTHREQ_SIZE 4 static const struct sc_asn1_entry c_asn1_authreq[C_ASN1_AUTHREQ_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { "outerCAR", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_REQ_SIZE 2 static const struct sc_asn1_entry c_asn1_req[C_ASN1_REQ_SIZE] = { { "authenticatedrequest", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 7, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; static int read_file(sc_pkcs15_card_t * p15card, u8 fid[2], u8 *efbin, size_t *len, int optional) { sc_path_t path; int r; sc_path_set(&path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); /* look this up with our AID */ path.aid = sc_hsm_aid; /* we don't have a pre-known size of the file */ path.count = -1; if (!p15card->opts.use_file_cache || !efbin || SC_SUCCESS != sc_pkcs15_read_cached_file(p15card, &path, &efbin, len)) { /* avoid re-selection of SC-HSM */ path.aid.len = 0; r = sc_select_file(p15card->card, &path, NULL); if (r < 0) { sc_log(p15card->card->ctx, "Could not select EF"); } else { r = sc_read_binary(p15card->card, 0, efbin, *len, 0); } if (r < 0) { sc_log(p15card->card->ctx, "Could not read EF"); if (!optional) { return r; } /* optional files are saved as empty files to avoid card * transactions. Parsing the file's data will reveal that they were * missing. */ *len = 0; } else { *len = r; } if (p15card->opts.use_file_cache) { /* save this with our AID */ path.aid = sc_hsm_aid; sc_pkcs15_cache_file(p15card, &path, efbin, *len); } } return SC_SUCCESS; } /* * Decode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_decode_cvc(sc_pkcs15_card_t * p15card, const u8 ** buf, size_t *buflen, sc_cvc_t *cvc) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_req[C_ASN1_REQ_SIZE]; struct sc_asn1_entry asn1_authreq[C_ASN1_AUTHREQ_SIZE]; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; unsigned int cla,tag; size_t taglen; size_t lenchr = sizeof(cvc->chr); size_t lencar = sizeof(cvc->car); size_t lenoutercar = sizeof(cvc->outer_car); const u8 *tbuf; int r; memset(cvc, 0, sizeof(*cvc)); sc_copy_asn1_entry(c_asn1_req, asn1_req); sc_copy_asn1_entry(c_asn1_authreq, asn1_authreq); sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 1, &cvc->primeOrModulus, &cvc->primeOrModuluslen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 2, &cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 3, &cvc->coefficientB, &cvc->coefficientBlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 4, &cvc->basePointG, &cvc->basePointGlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 5, &cvc->order, &cvc->orderlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 6, &cvc->publicPoint, &cvc->publicPointlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 7, &cvc->cofactor, &cvc->cofactorlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 0); sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 0); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 0); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 0); sc_format_asn1_entry(asn1_cvcert + 1, &cvc->signature, &cvc->signatureLen, 0); sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq + 1, &cvc->outer_car, &lenoutercar, 0); sc_format_asn1_entry(asn1_authreq + 2, &cvc->outerSignature, &cvc->outerSignatureLen, 0); sc_format_asn1_entry(asn1_req , &asn1_authreq, NULL, 0); /* sc_asn1_print_tags(*buf, *buflen); */ tbuf = *buf; r = sc_asn1_read_tag(&tbuf, *buflen, &cla, &tag, &taglen); LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); /* Determine if we deal with an authenticated request, plain request or certificate */ if ((cla == (SC_ASN1_TAG_APPLICATION|SC_ASN1_TAG_CONSTRUCTED)) && (tag == 7)) { r = sc_asn1_decode(card->ctx, asn1_req, *buf, *buflen, buf, buflen); } else { r = sc_asn1_decode(card->ctx, asn1_cvc, *buf, *buflen, buf, buflen); } LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Encode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_encode_cvc(sc_pkcs15_card_t * p15card, sc_cvc_t *cvc, u8 ** buf, size_t *buflen) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; size_t lenchr; size_t lencar; int r; sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); asn1_cvc_pubkey[1].flags = SC_ASN1_OPTIONAL; asn1_cvcert[1].flags = SC_ASN1_OPTIONAL; sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 1); if (cvc->primeOrModulus && (cvc->primeOrModuluslen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 1, cvc->primeOrModulus, &cvc->primeOrModuluslen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 2, cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 1); if (cvc->coefficientB && (cvc->coefficientBlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 3, cvc->coefficientB, &cvc->coefficientBlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 4, cvc->basePointG, &cvc->basePointGlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 5, cvc->order, &cvc->orderlen, 1); if (cvc->publicPoint && (cvc->publicPointlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 6, cvc->publicPoint, &cvc->publicPointlen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 7, cvc->cofactor, &cvc->cofactorlen, 1); } if (cvc->modulusSize > 0) { sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 1); } sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 1); lencar = strnlen(cvc->car, sizeof cvc->car); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 1); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 1); lenchr = strnlen(cvc->chr, sizeof cvc->chr); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 1); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 1); if (cvc->signature && (cvc->signatureLen > 0)) { sc_format_asn1_entry(asn1_cvcert + 1, cvc->signature, &cvc->signatureLen, 1); } sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 1); r = sc_asn1_encode(card->ctx, asn1_cvc, buf, buflen); LOG_TEST_RET(card->ctx, r, "Could not encode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_get_curve(struct ec_curve **curve, u8 *oid, size_t oidlen) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].oid.len == oidlen) && !memcmp(curves[i].oid.value, oid, oidlen)) { *curve = &curves[i]; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } int sc_pkcs15emu_sc_hsm_get_curve_oid(sc_cvc_t *cvc, const struct sc_lv_data **oid) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].prime.len == cvc->primeOrModuluslen) && !memcmp(curves[i].prime.value, cvc->primeOrModulus, cvc->primeOrModuluslen)) { *oid = &curves[i].oid; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } static int sc_pkcs15emu_sc_hsm_get_rsa_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { pubkey->algorithm = SC_ALGORITHM_RSA; pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) return SC_ERROR_OUT_OF_MEMORY; pubkey->alg_id->algorithm = SC_ALGORITHM_RSA; pubkey->u.rsa.modulus.len = cvc->primeOrModuluslen; pubkey->u.rsa.modulus.data = malloc(pubkey->u.rsa.modulus.len); pubkey->u.rsa.exponent.len = cvc->coefficientAorExponentlen; pubkey->u.rsa.exponent.data = malloc(pubkey->u.rsa.exponent.len); if (!pubkey->u.rsa.modulus.data || !pubkey->u.rsa.exponent.data) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.rsa.exponent.data, cvc->coefficientAorExponent, pubkey->u.rsa.exponent.len); memcpy(pubkey->u.rsa.modulus.data, cvc->primeOrModulus, pubkey->u.rsa.modulus.len); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_get_ec_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { struct sc_ec_parameters *ecp; const struct sc_lv_data *oid; int r; pubkey->algorithm = SC_ALGORITHM_EC; r = sc_pkcs15emu_sc_hsm_get_curve_oid(cvc, &oid); if (r != SC_SUCCESS) return r; ecp = calloc(1, sizeof(struct sc_ec_parameters)); if (!ecp) return SC_ERROR_OUT_OF_MEMORY; ecp->der.len = oid->len + 2; ecp->der.value = calloc(ecp->der.len, 1); if (!ecp->der.value) { free(ecp); return SC_ERROR_OUT_OF_MEMORY; } *(ecp->der.value + 0) = 0x06; *(ecp->der.value + 1) = (u8)oid->len; memcpy(ecp->der.value + 2, oid->value, oid->len); ecp->type = 1; // Named curve pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) { free(ecp->der.value); free(ecp); return SC_ERROR_OUT_OF_MEMORY; } pubkey->alg_id->algorithm = SC_ALGORITHM_EC; pubkey->alg_id->params = ecp; pubkey->u.ec.ecpointQ.value = malloc(cvc->publicPointlen); if (!pubkey->u.ec.ecpointQ.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.ecpointQ.value, cvc->publicPoint, cvc->publicPointlen); pubkey->u.ec.ecpointQ.len = cvc->publicPointlen; pubkey->u.ec.params.der.value = malloc(ecp->der.len); if (!pubkey->u.ec.params.der.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.params.der.value, ecp->der.value, ecp->der.len); pubkey->u.ec.params.der.len = ecp->der.len; /* FIXME: check return value? */ sc_pkcs15_fix_ec_parameters(ctx, &pubkey->u.ec.params); return SC_SUCCESS; } int sc_pkcs15emu_sc_hsm_get_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { if (cvc->publicPoint && cvc->publicPointlen) { return sc_pkcs15emu_sc_hsm_get_ec_public_key(ctx, cvc, pubkey); } else { return sc_pkcs15emu_sc_hsm_get_rsa_public_key(ctx, cvc, pubkey); } } void sc_pkcs15emu_sc_hsm_free_cvc(sc_cvc_t *cvc) { if (cvc->signature) { free(cvc->signature); cvc->signature = NULL; } if (cvc->primeOrModulus) { free(cvc->primeOrModulus); cvc->primeOrModulus = NULL; } if (cvc->coefficientAorExponent) { free(cvc->coefficientAorExponent); cvc->coefficientAorExponent = NULL; } if (cvc->coefficientB) { free(cvc->coefficientB); cvc->coefficientB = NULL; } if (cvc->basePointG) { free(cvc->basePointG); cvc->basePointG = NULL; } if (cvc->order) { free(cvc->order); cvc->order = NULL; } if (cvc->publicPoint) { free(cvc->publicPoint); cvc->publicPoint = NULL; } if (cvc->cofactor) { free(cvc->cofactor); cvc->cofactor = NULL; } } static int sc_pkcs15emu_sc_hsm_add_pubkey(sc_pkcs15_card_t *p15card, u8 *efbin, size_t len, sc_pkcs15_prkey_info_t *key_info, char *label) { struct sc_context *ctx = p15card->card->ctx; sc_card_t *card = p15card->card; sc_pkcs15_pubkey_info_t pubkey_info; sc_pkcs15_object_t pubkey_obj; struct sc_pkcs15_pubkey pubkey; sc_cvc_t cvc; u8 *cvcpo; int r; cvcpo = efbin; memset(&cvc, 0, sizeof(cvc)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&cvcpo, &len, &cvc); LOG_TEST_RET(ctx, r, "Could decode certificate signing request"); memset(&pubkey, 0, sizeof(pubkey)); r = sc_pkcs15emu_sc_hsm_get_public_key(ctx, &cvc, &pubkey); LOG_TEST_RET(card->ctx, r, "Could not extract public key"); memset(&pubkey_info, 0, sizeof(pubkey_info)); memset(&pubkey_obj, 0, sizeof(pubkey_obj)); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_obj.content.value, &pubkey_obj.content.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_info.direct.raw.value, &pubkey_info.direct.raw.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey_as_spki(ctx, &pubkey, &pubkey_info.direct.spki.value, &pubkey_info.direct.spki.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); pubkey_info.id = key_info->id; strlcpy(pubkey_obj.label, label, sizeof(pubkey_obj.label)); if (pubkey.algorithm == SC_ALGORITHM_RSA) { pubkey_info.modulus_length = pubkey.u.rsa.modulus.len << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_ENCRYPT|SC_PKCS15_PRKEY_USAGE_VERIFY|SC_PKCS15_PRKEY_USAGE_WRAP; r = sc_pkcs15emu_add_rsa_pubkey(p15card, &pubkey_obj, &pubkey_info); } else { /* TODO fix if support of non multiple of 8 curves are added */ pubkey_info.field_length = cvc.primeOrModuluslen << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_VERIFY; r = sc_pkcs15emu_add_ec_pubkey(p15card, &pubkey_obj, &pubkey_info); } LOG_TEST_RET(ctx, r, "Could not add public key"); sc_pkcs15emu_sc_hsm_free_cvc(&cvc); sc_pkcs15_erase_pubkey(&pubkey); return SC_SUCCESS; } /* * Add a key and the key description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_prkd(sc_pkcs15_card_t * p15card, u8 keyid) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t cert_info; sc_pkcs15_object_t cert_obj; struct sc_pkcs15_object prkd; sc_pkcs15_prkey_info_t *key_info; u8 fid[2]; /* enough to hold a complete certificate */ u8 efbin[4096]; u8 *ptr; size_t len; int r; fid[0] = PRKD_PREFIX; fid[1] = keyid; /* Try to select a related EF containing the PKCS#15 description of the key */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); ptr = efbin; memset(&prkd, 0, sizeof(prkd)); r = sc_pkcs15_decode_prkdf_entry(p15card, &prkd, (const u8 **)&ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); /* All keys require user PIN authentication */ prkd.auth_id.len = 1; prkd.auth_id.value[0] = 1; /* * Set private key flag as all keys are private anyway */ prkd.flags |= SC_PKCS15_CO_FLAG_PRIVATE; key_info = (sc_pkcs15_prkey_info_t *)prkd.data; key_info->key_reference = keyid; key_info->path.aid.len = 0; if (prkd.type == SC_PKCS15_TYPE_PRKEY_RSA) { r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkd, key_info); } else { r = sc_pkcs15emu_add_ec_prkey(p15card, &prkd, key_info); } LOG_TEST_RET(card->ctx, r, "Could not add private key to framework"); /* Check if we also have a certificate for the private key */ fid[0] = EE_CERTIFICATE_PREFIX; len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 0); LOG_TEST_RET(card->ctx, r, "Could not read EF"); if (efbin[0] == 0x67) { /* Decode CSR and create public key object */ sc_pkcs15emu_sc_hsm_add_pubkey(p15card, efbin, len, key_info, prkd.label); free(key_info); return SC_SUCCESS; /* Ignore any errors */ } if (efbin[0] != 0x30) { free(key_info); return SC_SUCCESS; } memset(&cert_info, 0, sizeof(cert_info)); memset(&cert_obj, 0, sizeof(cert_obj)); cert_info.id = key_info->id; sc_path_set(&cert_info.path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); cert_info.path.count = -1; if (p15card->opts.use_file_cache) { /* look this up with our AID, which should already be cached from the * call to `read_file`. This may have the side effect that OpenSC's * caching layer re-selects our applet *if the cached file cannot be * found/used* and we may loose the authentication status. We assume * that caching works perfectly without this side effect. */ cert_info.path.aid = sc_hsm_aid; } strlcpy(cert_obj.label, prkd.label, sizeof(cert_obj.label)); r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info); free(key_info); LOG_TEST_RET(card->ctx, r, "Could not add certificate"); return SC_SUCCESS; } /* * Add a data object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_dcod(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_data_info_t *data_info; sc_pkcs15_object_t data_obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = DCOD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&data_obj, 0, sizeof(data_obj)); r = sc_pkcs15_decode_dodf_entry(p15card, &data_obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Could not decode optional EF.DCOD"); data_info = (sc_pkcs15_data_info_t *)data_obj.data; r = sc_pkcs15emu_add_data_object(p15card, &data_obj, data_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } /* * Add a unrelated certificate object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_cd(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t *cert_info; sc_pkcs15_object_t obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = CD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&obj, 0, sizeof(obj)); r = sc_pkcs15_decode_cdf_entry(p15card, &obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.CDOD"); cert_info = (sc_pkcs15_cert_info_t *)obj.data; r = sc_pkcs15emu_add_x509_cert(p15card, &obj, cert_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_read_tokeninfo (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; int r; u8 efbin[512]; size_t len; LOG_FUNC_CALLED(card->ctx); /* Read token info */ len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x03", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); r = sc_pkcs15_parse_tokeninfo(card->ctx, p15card->tokeninfo, efbin, len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Initialize PKCS#15 emulation with user PIN, private keys, certificate and data objects * */ static int sc_pkcs15emu_sc_hsm_init (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; sc_hsm_private_data_t *priv = (sc_hsm_private_data_t *) card->drv_data; sc_file_t *file = NULL; sc_path_t path; u8 filelist[MAX_EXT_APDU_LENGTH]; int filelistlength; int r, i; sc_cvc_t devcert; struct sc_app_info *appinfo; struct sc_pkcs15_auth_info pin_info; struct sc_pkcs15_object pin_obj; struct sc_pin_cmd_data pindata; u8 efbin[1024]; u8 *ptr; size_t len; LOG_FUNC_CALLED(card->ctx); appinfo = calloc(1, sizeof(struct sc_app_info)); if (appinfo == NULL) { LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->aid = sc_hsm_aid; appinfo->ddo.aid = sc_hsm_aid; p15card->app = appinfo; sc_path_set(&path, SC_PATH_TYPE_DF_NAME, sc_hsm_aid.value, sc_hsm_aid.len, 0, 0); r = sc_select_file(card, &path, &file); LOG_TEST_RET(card->ctx, r, "Could not select SmartCard-HSM application"); p15card->card->version.hw_major = 24; /* JCOP 2.4.1r3 */ p15card->card->version.hw_minor = 13; if (file && file->prop_attr && file->prop_attr_len >= 2) { p15card->card->version.fw_major = file->prop_attr[file->prop_attr_len - 2]; p15card->card->version.fw_minor = file->prop_attr[file->prop_attr_len - 1]; } sc_file_free(file); /* Read device certificate to determine serial number */ if (priv->EF_C_DevAut && priv->EF_C_DevAut_len) { ptr = priv->EF_C_DevAut; len = priv->EF_C_DevAut_len; } else { len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x02", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.C_DevAut"); if (len > 0) { /* save EF_C_DevAut for further use */ ptr = realloc(priv->EF_C_DevAut, len); if (ptr) { memcpy(ptr, efbin, len); priv->EF_C_DevAut = ptr; priv->EF_C_DevAut_len = len; } } ptr = efbin; } memset(&devcert, 0 ,sizeof(devcert)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&ptr, &len, &devcert); LOG_TEST_RET(card->ctx, r, "Could not decode EF.C_DevAut"); sc_pkcs15emu_sc_hsm_read_tokeninfo(p15card); if (p15card->tokeninfo->label == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->label = strdup("GoID"); } else { p15card->tokeninfo->label = strdup("SmartCard-HSM"); } if (p15card->tokeninfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } if ((p15card->tokeninfo->manufacturer_id != NULL) && !strcmp("(unknown)", p15card->tokeninfo->manufacturer_id)) { free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = NULL; } if (p15card->tokeninfo->manufacturer_id == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->manufacturer_id = strdup("Bundesdruckerei GmbH"); } else { p15card->tokeninfo->manufacturer_id = strdup("www.CardContact.de"); } if (p15card->tokeninfo->manufacturer_id == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->label = strdup(p15card->tokeninfo->label); if (appinfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); len = strnlen(devcert.chr, sizeof devcert.chr); /* Strip last 5 digit sequence number from CHR */ assert(len >= 8); len -= 5; p15card->tokeninfo->serial_number = calloc(len + 1, 1); if (p15card->tokeninfo->serial_number == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(p15card->tokeninfo->serial_number, devcert.chr, len); *(p15card->tokeninfo->serial_number + len) = 0; sc_hsm_set_serialnr(card, p15card->tokeninfo->serial_number); sc_pkcs15emu_sc_hsm_free_cvc(&devcert); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 1; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x81; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_EXCHANGE_REF_DATA; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; pin_info.attrs.pin.min_length = 6; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 15; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 3; pin_info.max_tries = 3; pin_obj.auth_id.len = 1; pin_obj.auth_id.value[0] = 2; strlcpy(pin_obj.label, "UserPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE|SC_PKCS15_CO_FLAG_MODIFIABLE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 2; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x88; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_UNBLOCK_DISABLED|SC_PKCS15_PIN_FLAG_SO_PIN; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_BCD; pin_info.attrs.pin.min_length = 16; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 16; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 15; pin_info.max_tries = 15; strlcpy(pin_obj.label, "SOPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); if (card->type == SC_CARD_TYPE_SC_HSM_SOC || card->type == SC_CARD_TYPE_SC_HSM_GOID) { /* SC-HSM of this type always has a PIN-Pad */ r = SC_SUCCESS; } else { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x85; r = sc_pin_cmd(card, &pindata, NULL); } if (r == SC_ERROR_DATA_OBJECT_NOT_FOUND) { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x86; r = sc_pin_cmd(card, &pindata, NULL); } if ((r != SC_ERROR_DATA_OBJECT_NOT_FOUND) && (r != SC_ERROR_INCORRECT_PARAMETERS)) card->caps |= SC_CARD_CAP_PROTECTED_AUTHENTICATION_PATH; filelistlength = sc_list_files(card, filelist, sizeof(filelist)); LOG_TEST_RET(card->ctx, filelistlength, "Could not enumerate file and key identifier"); for (i = 0; i < filelistlength; i += 2) { switch(filelist[i]) { case KEY_PREFIX: r = sc_pkcs15emu_sc_hsm_add_prkd(p15card, filelist[i + 1]); break; case DCOD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_dcod(p15card, filelist[i + 1]); break; case CD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_cd(p15card, filelist[i + 1]); break; } if (r != SC_SUCCESS) { sc_log(card->ctx, "Error %d adding elements to framework", r); } } LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && (opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK)) { return sc_pkcs15emu_sc_hsm_init(p15card); } else { if (p15card->card->type != SC_CARD_TYPE_SC_HSM && p15card->card->type != SC_CARD_TYPE_SC_HSM_SOC && p15card->card->type != SC_CARD_TYPE_SC_HSM_GOID) { return SC_ERROR_WRONG_CARD; } return sc_pkcs15emu_sc_hsm_init(p15card); } }
./CrossVul/dataset_final_sorted/CWE-119/c/good_339_6
crossvul-cpp_data_good_1050_3
/* Copyright (C) 2002-2012 by George Williams */ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <fontforge-config.h> #if !defined(_NO_FFSCRIPT) || !defined(_NO_PYTHON) #include "cvundoes.h" #include "fontforgeui.h" #include "gfile.h" #include "gkeysym.h" #include "gresource.h" #include "scriptfuncs.h" #include "scripting.h" #include "ustring.h" #include "utype.h" struct sd_data { int done; FontView *fv; SplineChar *sc; int layer; GWindow gw; int oldh; }; #define SD_Width 250 #define SD_Height 270 #define CID_Script 1001 #define CID_Box 1002 #define CID_OK 1003 #define CID_Call 1004 #define CID_Cancel 1005 #define CID_Python 1006 #define CID_FF 1007 static int SD_Call(GGadget *g, GEvent *e) { if ( e->type==et_controlevent && e->u.control.subtype == et_buttonactivate ) { char *fn; unichar_t *insert; fn = gwwv_open_filename(_("Call Script"), NULL, "*",NULL); if ( fn==NULL ) return(true); insert = malloc((strlen(fn)+10)*sizeof(unichar_t)); *insert = '"'; utf82u_strcpy(insert+1,fn); uc_strcat(insert,"\"()"); GTextFieldReplace(GWidgetGetControl(GGadgetGetWindow(g),CID_Script),insert); free(insert); free(fn); } return( true ); } #if !defined(_NO_FFSCRIPT) static void ExecNative(GGadget *g, GEvent *e) { struct sd_data *sd = GDrawGetUserData(GGadgetGetWindow(g)); Context c; Val args[1]; jmp_buf env; memset( &c,0,sizeof(c)); memset( args,0,sizeof(args)); running_script = true; c.a.argc = 1; c.a.vals = args; c.filename = args[0].u.sval = "ScriptDlg"; args[0].type = v_str; c.return_val.type = v_void; c.err_env = &env; c.curfv = (FontViewBase *) sd->fv; if ( setjmp(env)!=0 ) { running_script = false; return; /* Error return */ } c.script = GFileTmpfile(); if ( c.script==NULL ) ScriptError(&c, "Can't create temporary file"); else { const unichar_t *ret = _GGadgetGetTitle(GWidgetGetControl(sd->gw,CID_Script)); while ( *ret ) { /* There's a bug here. Filenames need to be converted to the local charset !!!! */ putc(*ret,c.script); ++ret; } rewind(c.script); ff_VerboseCheck(); c.lineno = 1; while ( !c.returned && !c.broken && ff_NextToken(&c)!=tt_eof ) { ff_backuptok(&c); ff_statement(&c); } fclose(c.script); sd->done = true; } running_script = false; } #endif #if !defined(_NO_PYTHON) static void ExecPython(GGadget *g, GEvent *e) { struct sd_data *sd = GDrawGetUserData(GGadgetGetWindow(g)); char *str; running_script = true; str = GGadgetGetTitle8(GWidgetGetControl(sd->gw,CID_Script)); PyFF_ScriptString((FontViewBase *) sd->fv,sd->sc,sd->layer,str); free(str); running_script = false; } #endif #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) static void _SD_LangChanged(struct sd_data *sd) { GGadgetSetEnabled(GWidgetGetControl(sd->gw,CID_Call), !GGadgetIsChecked(GWidgetGetControl(sd->gw,CID_Python))); } static int SD_LangChanged(GGadget *g, GEvent *e) { if ( e->type==et_controlevent && e->u.control.subtype == et_radiochanged ) { struct sd_data *sd = GDrawGetUserData(GGadgetGetWindow(g)); _SD_LangChanged(sd); } return( true ); } #endif static int SD_OK(GGadget *g, GEvent *e) { if ( e->type==et_controlevent && e->u.control.subtype == et_buttonactivate ) { struct sd_data *sd = GDrawGetUserData(GGadgetGetWindow(g)); #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) if ( GGadgetIsChecked(GWidgetGetControl(GGadgetGetWindow(g),CID_Python)) ) ExecPython(g,e); else ExecNative(g,e); #elif !defined(_NO_PYTHON) ExecPython(g,e); #elif !defined(_NO_FFSCRIPT) ExecNative(g,e); #endif sd->done = true; } return( true ); } static void SD_DoCancel(struct sd_data *sd) { sd->done = true; } static int SD_Cancel(GGadget *g, GEvent *e) { if ( e->type==et_controlevent && e->u.control.subtype == et_buttonactivate ) { SD_DoCancel( GDrawGetUserData(GGadgetGetWindow(g))); } return( true ); } static int sd_e_h(GWindow gw, GEvent *event) { struct sd_data *sd = GDrawGetUserData(gw); if ( sd==NULL ) return( true ); if ( event->type==et_close ) { SD_DoCancel( sd ); } else if ( event->type==et_controlevent && event->u.control.subtype==et_textchanged ) { sd->fv->script_unsaved = !GTextFieldIsEmpty(GWidgetGetControl(sd->gw,CID_Script)); } else if ( event->type==et_controlevent && event->u.control.subtype==et_save ) { sd->fv->script_unsaved = false; } else if ( event->type==et_char ) { if ( event->u.chr.keysym == GK_F1 || event->u.chr.keysym == GK_Help ) { help("scripting.html"); return( true ); } return( false ); } else if ( event->type == et_map ) /* Above palettes */ GDrawRaise(gw); else if ( event->type == et_resize ) GDrawRequestExpose(gw,NULL,false); return( true ); } void ScriptDlg(FontView *fv,CharView *cv) { GRect pos; static GWindow gw; GWindowAttrs wattrs; GGadgetCreateData gcd[12], boxes[5], *barray[4][8], *hvarray[4][2]; #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) GGadgetCreateData *rarray[4]; #endif GTextInfo label[12]; struct sd_data sd; FontView *list; int i,l; memset(&sd,0,sizeof(sd)); sd.fv = fv; sd.sc = cv==NULL ? NULL : cv->b.sc; sd.layer = cv==NULL ? ly_fore : CVLayer((CharViewBase *) cv); sd.oldh = pos.height = GDrawPointsToPixels(NULL,SD_Height); if ( gw==NULL ) { memset(&wattrs,0,sizeof(wattrs)); wattrs.mask = wam_events|wam_cursor|wam_utf8_wtitle|wam_undercursor|wam_restrict|wam_isdlg; wattrs.event_masks = ~(1<<et_charup); wattrs.restrict_input_to_me = 1; wattrs.undercursor = 1; wattrs.cursor = ct_pointer; wattrs.utf8_window_title = _("Execute Script"); wattrs.is_dlg = true; pos.x = pos.y = 0; pos.width = GDrawPointsToPixels(NULL,GGadgetScale(SD_Width)); gw = GDrawCreateTopWindow(NULL,&pos,sd_e_h,&sd,&wattrs); memset(&boxes,0,sizeof(boxes)); memset(&gcd,0,sizeof(gcd)); memset(&label,0,sizeof(label)); i = l = 0; gcd[i].gd.pos.x = 10; gcd[i].gd.pos.y = 10; gcd[i].gd.pos.width = SD_Width-20; gcd[i].gd.pos.height = SD_Height-54; gcd[i].gd.flags = gg_visible | gg_enabled | gg_textarea_wrap; gcd[i].gd.cid = CID_Script; gcd[i++].creator = GTextAreaCreate; hvarray[l][0] = &gcd[i-1]; hvarray[l++][1] = NULL; #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) gcd[i-1].gd.pos.height -= 24; gcd[i].gd.pos.x = 10; gcd[i].gd.pos.y = gcd[i-1].gd.pos.y+gcd[i-1].gd.pos.height+1; gcd[i].gd.flags = gg_visible | gg_enabled | gg_cb_on; gcd[i].gd.cid = CID_Python; label[i].text = (unichar_t *) _("_Python"); label[i].text_is_1byte = true; label[i].text_in_resource = true; gcd[i].gd.label = &label[i]; gcd[i].gd.handle_controlevent = SD_LangChanged; gcd[i++].creator = GRadioCreate; rarray[0] = &gcd[i-1]; gcd[i].gd.pos.x = 70; gcd[i].gd.pos.y = gcd[i-1].gd.pos.y; gcd[i].gd.flags = gg_visible | gg_enabled; /* disabled if cv!=NULL later */ gcd[i].gd.cid = CID_FF; label[i].text = (unichar_t *) _("_FF"); label[i].text_is_1byte = true; label[i].text_in_resource = true; gcd[i].gd.label = &label[i]; gcd[i].gd.handle_controlevent = SD_LangChanged; gcd[i++].creator = GRadioCreate; rarray[1] = &gcd[i-1]; rarray[2] = GCD_Glue; rarray[3] = NULL; boxes[2].gd.flags = gg_enabled | gg_visible; boxes[2].gd.u.boxelements = rarray; boxes[2].creator = GHBoxCreate; hvarray[l][0] = &boxes[2]; hvarray[l++][1] = NULL; #endif barray[0][0] = barray[1][0] = barray[0][6] = barray[1][6] = GCD_Glue; barray[0][2] = barray[1][2] = barray[0][4] = barray[1][4] = GCD_Glue; barray[0][1] = barray[0][5] = GCD_RowSpan; barray[0][7] = barray[1][7] = barray[2][0] = NULL; gcd[i].gd.pos.x = 25-3; gcd[i].gd.pos.y = SD_Height-32-3; gcd[i].gd.flags = gg_visible | gg_enabled | gg_but_default; label[i].text = (unichar_t *) _("_OK"); label[i].text_is_1byte = true; label[i].text_in_resource = true; gcd[i].gd.mnemonic = 'O'; gcd[i].gd.label = &label[i]; gcd[i].gd.handle_controlevent = SD_OK; gcd[i].gd.cid = CID_OK; gcd[i++].creator = GButtonCreate; barray[1][1] = &gcd[i-1]; gcd[i].gd.pos.x = -25; gcd[i].gd.pos.y = SD_Height-32; gcd[i].gd.flags = gg_visible | gg_enabled | gg_but_cancel; label[i].text = (unichar_t *) _("_Cancel"); label[i].text_is_1byte = true; label[i].text_in_resource = true; gcd[i].gd.label = &label[i]; gcd[i].gd.mnemonic = 'C'; gcd[i].gd.handle_controlevent = SD_Cancel; gcd[i].gd.cid = CID_Cancel; gcd[i++].creator = GButtonCreate; barray[1][5] = &gcd[i-1]; gcd[i].gd.pos.x = (SD_Width-GIntGetResource(_NUM_Buttonsize)*100/GIntGetResource(_NUM_ScaleFactor))/2; gcd[i].gd.pos.y = SD_Height-40; gcd[i].gd.flags = gg_visible | gg_enabled; label[i].text = (unichar_t *) _("C_all..."); label[i].text_is_1byte = true; label[i].text_in_resource = true; gcd[i].gd.label = &label[i]; gcd[i].gd.mnemonic = 'a'; gcd[i].gd.handle_controlevent = SD_Call; gcd[i].gd.cid = CID_Call; gcd[i++].creator = GButtonCreate; barray[0][3] = &gcd[i-1]; #if !defined(_NO_FFSCRIPT) gcd[i].gd.pos.width = gcd[i].gd.pos.height = 5; gcd[i].gd.flags = gg_visible | gg_enabled; gcd[i++].creator = GSpacerCreate; barray[1][3] = &gcd[i-1]; #else barray[1][3] = GCD_RowSpan; #endif barray[3][0] = NULL; boxes[3].gd.flags = gg_enabled | gg_visible; boxes[3].gd.u.boxelements = barray[0]; boxes[3].creator = GHVBoxCreate; hvarray[l][0] = &boxes[3]; hvarray[l++][1] = NULL; hvarray[l][0] = NULL; boxes[0].gd.pos.x = boxes[0].gd.pos.y = 2; boxes[0].gd.flags = gg_enabled | gg_visible; boxes[0].gd.u.boxelements = hvarray[0]; boxes[0].creator = GHVGroupCreate; GGadgetsCreate(gw,boxes); if ( boxes[2].ret!=NULL ) GHVBoxSetExpandableCol(boxes[2].ret,gb_expandglue); GHVBoxSetExpandableCol(boxes[3].ret,gb_expandgluesame); GHVBoxSetExpandableRow(boxes[0].ret,0); GHVBoxFitWindow(boxes[0].ret); } #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) GGadgetSetEnabled(GWidgetGetControl(gw,CID_FF),cv==NULL); #endif sd.gw = gw; GDrawSetUserData(gw,&sd); GWidgetIndicateFocusGadget(GWidgetGetControl(gw,CID_Script)); #if !defined(_NO_FFSCRIPT) && !defined(_NO_PYTHON) _SD_LangChanged(&sd); #endif GDrawSetVisible(gw,true); while ( !sd.done ) GDrawProcessOneEvent(NULL); GDrawSetVisible(gw,false); /* Selection may be out of date, force a refresh */ for ( list = fv_list; list!=NULL; list=(FontView *) list->b.next ) GDrawRequestExpose(list->v,NULL,false); GDrawSync(NULL); GDrawProcessPendingEvents(NULL); GDrawSetUserData(gw,NULL); } #endif /* No scripting */
./CrossVul/dataset_final_sorted/CWE-119/c/good_1050_3
crossvul-cpp_data_bad_3163_0
/* radare - LGPL - Copyright 2016 - Oscar Salvador */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <r_io.h> #include "bflt/bflt.h" static void *load_bytes(RBinFile *arch, const ut8 *buf, ut64 sz, ut64 loaddr, Sdb *sdb) { if (!buf || !sz || sz == UT64_MAX) { return NULL; } RBuffer *tbuf = r_buf_new (); r_buf_set_bytes (tbuf, buf, sz); struct r_bin_bflt_obj *res = r_bin_bflt_new_buf (tbuf); r_buf_free (tbuf); return res ? res : NULL; } static int load(RBinFile *arch) { const ut8 *bytes = r_buf_buffer (arch->buf); ut64 sz = r_buf_size (arch->buf); arch->o->bin_obj = load_bytes (arch, bytes, sz, arch->o->loadaddr, arch->sdb); return arch->o->bin_obj ? true : false; } static RList *entries(RBinFile *arch) { struct r_bin_bflt_obj *obj = (struct r_bin_bflt_obj*)arch->o->bin_obj; RList *ret; RBinAddr *ptr; if (!(ret = r_list_newf (free))) { return NULL; } ptr = r_bflt_get_entry (obj); if (!ptr) { return NULL; } r_list_append (ret, ptr); return ret; } static void __patch_reloc(RBuffer *buf, ut32 addr_to_patch, ut32 data_offset) { ut8 val[4] = { 0 }; r_write_le32 (val, data_offset); r_buf_write_at (buf, addr_to_patch, (void *)val, sizeof (val)); } static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { int i; for (i = 0; i < n_reloc; i++) { if (addr_to_patch == reloc_table[i].data_offset) { return i; } } return -1; } static RList *patch_relocs(RBin *b) { struct r_bin_bflt_obj *bin = NULL; RList *list = NULL; RBinObject *obj; int i = 0; if (!b || !b->iob.io || !b->iob.io->desc) { return NULL; } if (!b->iob.io->cached) { eprintf ( "Warning: please run r2 with -e io.cache=true to patch " "relocations\n"); return list; } obj = r_bin_cur_object (b); if (!obj) { return NULL; } bin = obj->bin_obj; list = r_list_newf ((RListFree)free); if (!list) { return NULL; } if (bin->got_table) { struct reloc_struct_t *got_table = bin->got_table; for (i = 0; i < bin->n_got; i++) { __patch_reloc (bin->b, got_table[i].addr_to_patch, got_table[i].data_offset); RBinReloc *reloc = R_NEW0 (RBinReloc); if (reloc) { reloc->type = R_BIN_RELOC_32; reloc->paddr = got_table[i].addr_to_patch; reloc->vaddr = reloc->paddr; r_list_append (list, reloc); } } R_FREE (bin->got_table); } if (bin->reloc_table) { struct reloc_struct_t *reloc_table = bin->reloc_table; for (i = 0; i < bin->hdr->reloc_count; i++) { int found = search_old_relocation (reloc_table, reloc_table[i].addr_to_patch, bin->hdr->reloc_count); if (found != -1) { __patch_reloc (bin->b, reloc_table[found].addr_to_patch, reloc_table[i].data_offset); } else { __patch_reloc (bin->b, reloc_table[i].addr_to_patch, reloc_table[i].data_offset); } RBinReloc *reloc = R_NEW0 (RBinReloc); if (reloc) { reloc->type = R_BIN_RELOC_32; reloc->paddr = reloc_table[i].addr_to_patch; reloc->vaddr = reloc->paddr; r_list_append (list, reloc); } } R_FREE (bin->reloc_table); } b->iob.write_at (b->iob.io, bin->b->base, bin->b->buf, bin->b->length); return list; } static int get_ngot_entries(struct r_bin_bflt_obj *obj) { ut32 data_size = obj->hdr->data_end - obj->hdr->data_start; int i = 0, n_got = 0; if (data_size > obj->size) { return 0; } for (i = 0, n_got = 0; i < data_size ; i+= 4, n_got++) { ut32 entry, offset = obj->hdr->data_start; if (offset + i + sizeof (ut32) > obj->size || offset + i + sizeof (ut32) < offset) { return 0; } int len = r_buf_read_at (obj->b, offset + i, (ut8 *)&entry, sizeof (ut32)); if (len != sizeof (ut32)) { return 0; } if (!VALID_GOT_ENTRY (entry)) { break; } } return n_got; } static RList *relocs(RBinFile *arch) { struct r_bin_bflt_obj *obj = (struct r_bin_bflt_obj*)arch->o->bin_obj; RList *list = r_list_newf ((RListFree)free); int i, len, n_got, amount; if (!list || !obj) { r_list_free (list); return NULL; } if (obj->hdr->flags & FLAT_FLAG_GOTPIC) { n_got = get_ngot_entries (obj); if (n_got) { amount = n_got * sizeof (ut32); if (amount < n_got || amount > UT32_MAX) { goto out_error; } struct reloc_struct_t *got_table = calloc (1, n_got * sizeof (ut32)); if (got_table) { ut32 offset = 0; for (i = 0; i < n_got ; offset += 4, i++) { ut32 got_entry; if (obj->hdr->data_start + offset + 4 > obj->size || obj->hdr->data_start + offset + 4 < offset) { break; } len = r_buf_read_at (obj->b, obj->hdr->data_start + offset, (ut8 *)&got_entry, sizeof (ut32)); if (!VALID_GOT_ENTRY (got_entry) || len != sizeof (ut32)) { break; } got_table[i].addr_to_patch = got_entry; got_table[i].data_offset = got_entry + BFLT_HDR_SIZE; } obj->n_got = n_got; obj->got_table = got_table; } } } if (obj->hdr->reloc_count > 0) { int n_reloc = obj->hdr->reloc_count; amount = n_reloc * sizeof (struct reloc_struct_t); if (amount < n_reloc || amount > UT32_MAX) { goto out_error; } struct reloc_struct_t *reloc_table = calloc (1, amount + 1); if (!reloc_table) { goto out_error; } amount = n_reloc * sizeof (ut32); if (amount < n_reloc || amount > UT32_MAX) { free (reloc_table); goto out_error; } ut32 *reloc_pointer_table = calloc (1, amount + 1); if (!reloc_pointer_table) { free (reloc_table); goto out_error; } if (obj->hdr->reloc_start + amount > obj->size || obj->hdr->reloc_start + amount < amount) { free (reloc_table); free (reloc_pointer_table); goto out_error; } len = r_buf_read_at (obj->b, obj->hdr->reloc_start, (ut8 *)reloc_pointer_table, amount); if (len != amount) { free (reloc_table); free (reloc_pointer_table); goto out_error; } for (i = 0; i < obj->hdr->reloc_count; i++) { //XXX it doesn't take endian as consideration when swapping ut32 reloc_offset = r_swap_ut32 (reloc_pointer_table[i]) + BFLT_HDR_SIZE; if (reloc_offset < obj->hdr->bss_end && reloc_offset < obj->size) { ut32 reloc_fixed, reloc_data_offset; if (reloc_offset + sizeof (ut32) > obj->size || reloc_offset + sizeof (ut32) < reloc_offset) { free (reloc_table); free (reloc_pointer_table); goto out_error; } len = r_buf_read_at (obj->b, reloc_offset, (ut8 *)&reloc_fixed, sizeof (ut32)); if (len != sizeof (ut32)) { eprintf ("problem while reading relocation entries\n"); free (reloc_table); free (reloc_pointer_table); goto out_error; } reloc_data_offset = r_swap_ut32 (reloc_fixed) + BFLT_HDR_SIZE; reloc_table[i].addr_to_patch = reloc_offset; reloc_table[i].data_offset = reloc_data_offset; RBinReloc *reloc = R_NEW0 (RBinReloc); if (reloc) { reloc->type = R_BIN_RELOC_32; reloc->paddr = reloc_table[i].addr_to_patch; reloc->vaddr = reloc->paddr; r_list_append (list, reloc); } } } free (reloc_pointer_table); obj->reloc_table = reloc_table; } return list; out_error: r_list_free (list); return NULL; } static RBinInfo *info(RBinFile *arch) { struct r_bin_bflt_obj *obj = NULL; RBinInfo *info = NULL; if (!arch || !arch->o || !arch->o->bin_obj) { return NULL; } obj = (struct r_bin_bflt_obj*)arch->o->bin_obj; if (!(info = R_NEW0 (RBinInfo))) { return NULL; } info->file = arch->file ? strdup (arch->file) : NULL; info->rclass = strdup ("bflt"); info->bclass = strdup ("bflt" ); info->type = strdup ("bFLT (Executable file)"); info->os = strdup ("Linux"); info->subsystem = strdup ("Linux"); info->arch = strdup ("arm"); info->big_endian = obj->endian; info->bits = 32; info->has_va = false; info->dbg_info = 0; info->machine = strdup ("unknown"); return info; } static int check_bytes(const ut8 *buf, ut64 length) { return length > 4 && !memcmp (buf, "bFLT", 4); } static int check(RBinFile *arch) { const ut8 *bytes = arch ? r_buf_buffer (arch->buf) : NULL; ut64 sz = arch ? r_buf_size (arch->buf): 0; if (!bytes || !sz) { return false; } return check_bytes (bytes, sz); } static int destroy(RBinFile *arch) { r_bin_bflt_free ((struct r_bin_bflt_obj*)arch->o->bin_obj); return true; } RBinPlugin r_bin_plugin_bflt = { .name = "bflt", .desc = "bFLT format r_bin plugin", .license = "LGPL3", .load = &load, .load_bytes = &load_bytes, .destroy = &destroy, .check = &check, .check_bytes = &check_bytes, .entries = &entries, .info = &info, .relocs = &relocs, .patch_relocs = &patch_relocs, }; #ifndef CORELIB RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_bflt, .version = R2_VERSION }; #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3163_0
crossvul-cpp_data_good_343_6
/* * pkcs15-sc-hsm.c : Initialize PKCS#15 emulation * * Copyright (C) 2012 Andreas Schwier, CardContact, Minden, Germany * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #include "asn1.h" #include "common/compat_strlcpy.h" #include "common/compat_strnlen.h" #include "card-sc-hsm.h" extern struct sc_aid sc_hsm_aid; void sc_hsm_set_serialnr(sc_card_t *card, char *serial); static struct ec_curve curves[] = { { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x01", 8}, // secp192r1 aka prime192r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 24}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 24}, { (unsigned char *) "\x64\x21\x05\x19\xE5\x9C\x80\xE7\x0F\xA7\xE9\xAB\x72\x24\x30\x49\xFE\xB8\xDE\xEC\xC1\x46\xB9\xB1", 24}, { (unsigned char *) "\x04\x18\x8D\xA8\x0E\xB0\x30\x90\xF6\x7C\xBF\x20\xEB\x43\xA1\x88\x00\xF4\xFF\x0A\xFD\x82\xFF\x10\x12\x07\x19\x2B\x95\xFF\xC8\xDA\x78\x63\x10\x11\xED\x6B\x24\xCD\xD5\x73\xF9\x77\xA1\x1E\x79\x48\x11", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x99\xDE\xF8\x36\x14\x6B\xC9\xB1\xB4\xD2\x28\x31", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2A\x86\x48\xCE\x3D\x03\x01\x07", 8}, // secp256r1 aka prime256r1 { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 32}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC", 32}, { (unsigned char *) "\x5A\xC6\x35\xD8\xAA\x3A\x93\xE7\xB3\xEB\xBD\x55\x76\x98\x86\xBC\x65\x1D\x06\xB0\xCC\x53\xB0\xF6\x3B\xCE\x3C\x3E\x27\xD2\x60\x4B", 32}, { (unsigned char *) "\x04\x6B\x17\xD1\xF2\xE1\x2C\x42\x47\xF8\xBC\xE6\xE5\x63\xA4\x40\xF2\x77\x03\x7D\x81\x2D\xEB\x33\xA0\xF4\xA1\x39\x45\xD8\x98\xC2\x96\x4F\xE3\x42\xE2\xFE\x1A\x7F\x9B\x8E\xE7\xEB\x4A\x7C\x0F\x9E\x16\x2B\xCE\x33\x57\x6B\x31\x5E\xCE\xCB\xB6\x40\x68\x37\xBF\x51\xF5", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xBC\xE6\xFA\xAD\xA7\x17\x9E\x84\xF3\xB9\xCA\xC2\xFC\x63\x25\x51", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x03", 9}, // brainpoolP192r1 { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x30\x93\xD1\x8D\xB7\x8F\xCE\x47\x6D\xE1\xA8\x62\x97", 24}, { (unsigned char *) "\x6A\x91\x17\x40\x76\xB1\xE0\xE1\x9C\x39\xC0\x31\xFE\x86\x85\xC1\xCA\xE0\x40\xE5\xC6\x9A\x28\xEF", 24}, { (unsigned char *) "\x46\x9A\x28\xEF\x7C\x28\xCC\xA3\xDC\x72\x1D\x04\x4F\x44\x96\xBC\xCA\x7E\xF4\x14\x6F\xBF\x25\xC9", 24}, { (unsigned char *) "\x04\xC0\xA0\x64\x7E\xAA\xB6\xA4\x87\x53\xB0\x33\xC5\x6C\xB0\xF0\x90\x0A\x2F\x5C\x48\x53\x37\x5F\xD6\x14\xB6\x90\x86\x6A\xBD\x5B\xB8\x8B\x5F\x48\x28\xC1\x49\x00\x02\xE6\x77\x3F\xA2\xFA\x29\x9B\x8F", 49}, { (unsigned char *) "\xC3\x02\xF4\x1D\x93\x2A\x36\xCD\xA7\xA3\x46\x2F\x9E\x9E\x91\x6B\x5B\xE8\xF1\x02\x9A\xC4\xAC\xC1", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x05", 9}, // brainpoolP224r1 { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD1\xD7\x87\xB0\x9F\x07\x57\x97\xDA\x89\xF5\x7E\xC8\xC0\xFF", 28}, { (unsigned char *) "\x68\xA5\xE6\x2C\xA9\xCE\x6C\x1C\x29\x98\x03\xA6\xC1\x53\x0B\x51\x4E\x18\x2A\xD8\xB0\x04\x2A\x59\xCA\xD2\x9F\x43", 28}, { (unsigned char *) "\x25\x80\xF6\x3C\xCF\xE4\x41\x38\x87\x07\x13\xB1\xA9\x23\x69\xE3\x3E\x21\x35\xD2\x66\xDB\xB3\x72\x38\x6C\x40\x0B", 28}, { (unsigned char *) "\x04\x0D\x90\x29\xAD\x2C\x7E\x5C\xF4\x34\x08\x23\xB2\xA8\x7D\xC6\x8C\x9E\x4C\xE3\x17\x4C\x1E\x6E\xFD\xEE\x12\xC0\x7D\x58\xAA\x56\xF7\x72\xC0\x72\x6F\x24\xC6\xB8\x9E\x4E\xCD\xAC\x24\x35\x4B\x9E\x99\xCA\xA3\xF6\xD3\x76\x14\x02\xCD", 57}, { (unsigned char *) "\xD7\xC1\x34\xAA\x26\x43\x66\x86\x2A\x18\x30\x25\x75\xD0\xFB\x98\xD1\x16\xBC\x4B\x6D\xDE\xBC\xA3\xA5\xA7\x93\x9F", 28}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x07", 9}, // brainpoolP256r1 { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x72\x6E\x3B\xF6\x23\xD5\x26\x20\x28\x20\x13\x48\x1D\x1F\x6E\x53\x77", 32}, { (unsigned char *) "\x7D\x5A\x09\x75\xFC\x2C\x30\x57\xEE\xF6\x75\x30\x41\x7A\xFF\xE7\xFB\x80\x55\xC1\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9", 32}, { (unsigned char *) "\x26\xDC\x5C\x6C\xE9\x4A\x4B\x44\xF3\x30\xB5\xD9\xBB\xD7\x7C\xBF\x95\x84\x16\x29\x5C\xF7\xE1\xCE\x6B\xCC\xDC\x18\xFF\x8C\x07\xB6", 32}, { (unsigned char *) "\x04\x8B\xD2\xAE\xB9\xCB\x7E\x57\xCB\x2C\x4B\x48\x2F\xFC\x81\xB7\xAF\xB9\xDE\x27\xE1\xE3\xBD\x23\xC2\x3A\x44\x53\xBD\x9A\xCE\x32\x62\x54\x7E\xF8\x35\xC3\xDA\xC4\xFD\x97\xF8\x46\x1A\x14\x61\x1D\xC9\xC2\x77\x45\x13\x2D\xED\x8E\x54\x5C\x1D\x54\xC7\x2F\x04\x69\x97", 65}, { (unsigned char *) "\xA9\xFB\x57\xDB\xA1\xEE\xA9\xBC\x3E\x66\x0A\x90\x9D\x83\x8D\x71\x8C\x39\x7A\xA3\xB5\x61\xA6\xF7\x90\x1E\x0E\x82\x97\x48\x56\xA7", 32}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x24\x03\x03\x02\x08\x01\x01\x09", 9}, // brainpoolP320r1 { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA6\xF6\xF4\x0D\xEF\x4F\x92\xB9\xEC\x78\x93\xEC\x28\xFC\xD4\x12\xB1\xF1\xB3\x2E\x27", 40}, { (unsigned char *) "\x3E\xE3\x0B\x56\x8F\xBA\xB0\xF8\x83\xCC\xEB\xD4\x6D\x3F\x3B\xB8\xA2\xA7\x35\x13\xF5\xEB\x79\xDA\x66\x19\x0E\xB0\x85\xFF\xA9\xF4\x92\xF3\x75\xA9\x7D\x86\x0E\xB4", 40}, { (unsigned char *) "\x52\x08\x83\x94\x9D\xFD\xBC\x42\xD3\xAD\x19\x86\x40\x68\x8A\x6F\xE1\x3F\x41\x34\x95\x54\xB4\x9A\xCC\x31\xDC\xCD\x88\x45\x39\x81\x6F\x5E\xB4\xAC\x8F\xB1\xF1\xA6", 40}, { (unsigned char *) "\x04\x43\xBD\x7E\x9A\xFB\x53\xD8\xB8\x52\x89\xBC\xC4\x8E\xE5\xBF\xE6\xF2\x01\x37\xD1\x0A\x08\x7E\xB6\xE7\x87\x1E\x2A\x10\xA5\x99\xC7\x10\xAF\x8D\x0D\x39\xE2\x06\x11\x14\xFD\xD0\x55\x45\xEC\x1C\xC8\xAB\x40\x93\x24\x7F\x77\x27\x5E\x07\x43\xFF\xED\x11\x71\x82\xEA\xA9\xC7\x78\x77\xAA\xAC\x6A\xC7\xD3\x52\x45\xD1\x69\x2E\x8E\xE1", 81}, { (unsigned char *) "\xD3\x5E\x47\x20\x36\xBC\x4F\xB7\xE1\x3C\x78\x5E\xD2\x01\xE0\x65\xF9\x8F\xCF\xA5\xB6\x8F\x12\xA3\x2D\x48\x2E\xC7\xEE\x86\x58\xE9\x86\x91\x55\x5B\x44\xC5\x93\x11", 40}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x1F", 5}, // secp192k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xEE\x37", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 24}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03", 24}, { (unsigned char *) "\x04\xDB\x4F\xF1\x0E\xC0\x57\xE9\xAE\x26\xB0\x7D\x02\x80\xB7\xF4\x34\x1D\xA5\xD1\xB1\xEA\xE0\x6C\x7D\x9B\x2F\x2F\x6D\x9C\x56\x28\xA7\x84\x41\x63\xD0\x15\xBE\x86\x34\x40\x82\xAA\x88\xD9\x5E\x2F\x9D", 49}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\x26\xF2\xFC\x17\x0F\x69\x46\x6A\x74\xDE\xFD\x8D", 24}, { (unsigned char *) "\x01", 1} }, { { (unsigned char *) "\x2B\x81\x04\x00\x0A", 5}, // secp256k1 { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xFF\xFF\xFC\x2F", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 32}, { (unsigned char *) "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07", 32}, { (unsigned char *) "\x04\x79\xBE\x66\x7E\xF9\xDC\xBB\xAC\x55\xA0\x62\x95\xCE\x87\x0B\x07\x02\x9B\xFC\xDB\x2D\xCE\x28\xD9\x59\xF2\x81\x5B\x16\xF8\x17\x98\x48\x3A\xDA\x77\x26\xA3\xC4\x65\x5D\xA4\xFB\xFC\x0E\x11\x08\xA8\xFD\x17\xB4\x48\xA6\x85\x54\x19\x9C\x47\xD0\x8F\xFB\x10\xD4\xB8", 65}, { (unsigned char *) "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE\xBA\xAE\xDC\xE6\xAF\x48\xA0\x3B\xBF\xD2\x5E\x8C\xD0\x36\x41\x41", 32}, { (unsigned char *) "\x01", 1} }, { { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0}, { NULL, 0} } }; #define C_ASN1_CVC_PUBKEY_SIZE 10 static const struct sc_asn1_entry c_asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE] = { { "publicKeyOID", SC_ASN1_OBJECT, SC_ASN1_UNI | SC_ASN1_OBJECT, 0, NULL, NULL }, { "primeOrModulus", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 1, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientAorExponent", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 2, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "coefficientB", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 3, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "basePointG", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 4, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "order", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 5, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "publicPoint", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 6, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "cofactor", SC_ASN1_OCTET_STRING, SC_ASN1_CTX | 7, SC_ASN1_OPTIONAL | SC_ASN1_ALLOC, NULL, NULL }, { "modulusSize", SC_ASN1_INTEGER, SC_ASN1_UNI | SC_ASN1_INTEGER, SC_ASN1_OPTIONAL, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_BODY_SIZE 5 static const struct sc_asn1_entry c_asn1_cvc_body[C_ASN1_CVC_BODY_SIZE] = { { "certificateProfileIdentifier", SC_ASN1_INTEGER, SC_ASN1_APP | 0x1F29, 0, NULL, NULL }, { "certificationAuthorityReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "publicKey", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F49, 0, NULL, NULL }, { "certificateHolderReference", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 0x1F20, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVCERT_SIZE 3 static const struct sc_asn1_entry c_asn1_cvcert[C_ASN1_CVCERT_SIZE] = { { "certificateBody", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F4E, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_CVC_SIZE 2 static const struct sc_asn1_entry c_asn1_cvc[C_ASN1_CVC_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_AUTHREQ_SIZE 4 static const struct sc_asn1_entry c_asn1_authreq[C_ASN1_AUTHREQ_SIZE] = { { "certificate", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 0x1F21, 0, NULL, NULL }, { "outerCAR", SC_ASN1_PRINTABLESTRING, SC_ASN1_APP | 2, 0, NULL, NULL }, { "signature", SC_ASN1_OCTET_STRING, SC_ASN1_APP | 0x1F37, SC_ASN1_ALLOC, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; #define C_ASN1_REQ_SIZE 2 static const struct sc_asn1_entry c_asn1_req[C_ASN1_REQ_SIZE] = { { "authenticatedrequest", SC_ASN1_STRUCT, SC_ASN1_CONS | SC_ASN1_APP | 7, 0, NULL, NULL }, { NULL, 0, 0, 0, NULL, NULL } }; static int read_file(sc_pkcs15_card_t * p15card, u8 fid[2], u8 *efbin, size_t *len, int optional) { sc_path_t path; int r; sc_path_set(&path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); /* look this up with our AID */ path.aid = sc_hsm_aid; /* we don't have a pre-known size of the file */ path.count = -1; if (!p15card->opts.use_file_cache || !efbin || SC_SUCCESS != sc_pkcs15_read_cached_file(p15card, &path, &efbin, len)) { /* avoid re-selection of SC-HSM */ path.aid.len = 0; r = sc_select_file(p15card->card, &path, NULL); if (r < 0) { sc_log(p15card->card->ctx, "Could not select EF"); } else { r = sc_read_binary(p15card->card, 0, efbin, *len, 0); } if (r < 0) { sc_log(p15card->card->ctx, "Could not read EF"); if (!optional) { return r; } /* optional files are saved as empty files to avoid card * transactions. Parsing the file's data will reveal that they were * missing. */ *len = 0; } else { *len = r; } if (p15card->opts.use_file_cache) { /* save this with our AID */ path.aid = sc_hsm_aid; sc_pkcs15_cache_file(p15card, &path, efbin, *len); } } return SC_SUCCESS; } /* * Decode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_decode_cvc(sc_pkcs15_card_t * p15card, const u8 ** buf, size_t *buflen, sc_cvc_t *cvc) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_req[C_ASN1_REQ_SIZE]; struct sc_asn1_entry asn1_authreq[C_ASN1_AUTHREQ_SIZE]; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; unsigned int cla,tag; size_t taglen; size_t lenchr = sizeof(cvc->chr); size_t lencar = sizeof(cvc->car); size_t lenoutercar = sizeof(cvc->outer_car); const u8 *tbuf; int r; memset(cvc, 0, sizeof(*cvc)); sc_copy_asn1_entry(c_asn1_req, asn1_req); sc_copy_asn1_entry(c_asn1_authreq, asn1_authreq); sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 1, &cvc->primeOrModulus, &cvc->primeOrModuluslen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 2, &cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 3, &cvc->coefficientB, &cvc->coefficientBlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 4, &cvc->basePointG, &cvc->basePointGlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 5, &cvc->order, &cvc->orderlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 6, &cvc->publicPoint, &cvc->publicPointlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 7, &cvc->cofactor, &cvc->cofactorlen, 0); sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 0); sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 0); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 0); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 0); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 0); sc_format_asn1_entry(asn1_cvcert + 1, &cvc->signature, &cvc->signatureLen, 0); sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq , &asn1_cvcert, NULL, 0); sc_format_asn1_entry(asn1_authreq + 1, &cvc->outer_car, &lenoutercar, 0); sc_format_asn1_entry(asn1_authreq + 2, &cvc->outerSignature, &cvc->outerSignatureLen, 0); sc_format_asn1_entry(asn1_req , &asn1_authreq, NULL, 0); /* sc_asn1_print_tags(*buf, *buflen); */ tbuf = *buf; r = sc_asn1_read_tag(&tbuf, *buflen, &cla, &tag, &taglen); LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); /* Determine if we deal with an authenticated request, plain request or certificate */ if ((cla == (SC_ASN1_TAG_APPLICATION|SC_ASN1_TAG_CONSTRUCTED)) && (tag == 7)) { r = sc_asn1_decode(card->ctx, asn1_req, *buf, *buflen, buf, buflen); } else { r = sc_asn1_decode(card->ctx, asn1_cvc, *buf, *buflen, buf, buflen); } LOG_TEST_RET(card->ctx, r, "Could not decode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Encode a card verifiable certificate as defined in TR-03110. */ int sc_pkcs15emu_sc_hsm_encode_cvc(sc_pkcs15_card_t * p15card, sc_cvc_t *cvc, u8 ** buf, size_t *buflen) { sc_card_t *card = p15card->card; struct sc_asn1_entry asn1_cvc[C_ASN1_CVC_SIZE]; struct sc_asn1_entry asn1_cvcert[C_ASN1_CVCERT_SIZE]; struct sc_asn1_entry asn1_cvc_body[C_ASN1_CVC_BODY_SIZE]; struct sc_asn1_entry asn1_cvc_pubkey[C_ASN1_CVC_PUBKEY_SIZE]; size_t lenchr; size_t lencar; int r; sc_copy_asn1_entry(c_asn1_cvc, asn1_cvc); sc_copy_asn1_entry(c_asn1_cvcert, asn1_cvcert); sc_copy_asn1_entry(c_asn1_cvc_body, asn1_cvc_body); sc_copy_asn1_entry(c_asn1_cvc_pubkey, asn1_cvc_pubkey); asn1_cvc_pubkey[1].flags = SC_ASN1_OPTIONAL; asn1_cvcert[1].flags = SC_ASN1_OPTIONAL; sc_format_asn1_entry(asn1_cvc_pubkey , &cvc->pukoid, NULL, 1); if (cvc->primeOrModulus && (cvc->primeOrModuluslen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 1, cvc->primeOrModulus, &cvc->primeOrModuluslen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 2, cvc->coefficientAorExponent, &cvc->coefficientAorExponentlen, 1); if (cvc->coefficientB && (cvc->coefficientBlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 3, cvc->coefficientB, &cvc->coefficientBlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 4, cvc->basePointG, &cvc->basePointGlen, 1); sc_format_asn1_entry(asn1_cvc_pubkey + 5, cvc->order, &cvc->orderlen, 1); if (cvc->publicPoint && (cvc->publicPointlen > 0)) { sc_format_asn1_entry(asn1_cvc_pubkey + 6, cvc->publicPoint, &cvc->publicPointlen, 1); } sc_format_asn1_entry(asn1_cvc_pubkey + 7, cvc->cofactor, &cvc->cofactorlen, 1); } if (cvc->modulusSize > 0) { sc_format_asn1_entry(asn1_cvc_pubkey + 8, &cvc->modulusSize, NULL, 1); } sc_format_asn1_entry(asn1_cvc_body , &cvc->cpi, NULL, 1); lencar = strnlen(cvc->car, sizeof cvc->car); sc_format_asn1_entry(asn1_cvc_body + 1, &cvc->car, &lencar, 1); sc_format_asn1_entry(asn1_cvc_body + 2, &asn1_cvc_pubkey, NULL, 1); lenchr = strnlen(cvc->chr, sizeof cvc->chr); sc_format_asn1_entry(asn1_cvc_body + 3, &cvc->chr, &lenchr, 1); sc_format_asn1_entry(asn1_cvcert , &asn1_cvc_body, NULL, 1); if (cvc->signature && (cvc->signatureLen > 0)) { sc_format_asn1_entry(asn1_cvcert + 1, cvc->signature, &cvc->signatureLen, 1); } sc_format_asn1_entry(asn1_cvc , &asn1_cvcert, NULL, 1); r = sc_asn1_encode(card->ctx, asn1_cvc, buf, buflen); LOG_TEST_RET(card->ctx, r, "Could not encode card verifiable certificate"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_get_curve(struct ec_curve **curve, u8 *oid, size_t oidlen) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].oid.len == oidlen) && !memcmp(curves[i].oid.value, oid, oidlen)) { *curve = &curves[i]; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } int sc_pkcs15emu_sc_hsm_get_curve_oid(sc_cvc_t *cvc, const struct sc_lv_data **oid) { int i; for (i = 0; curves[i].oid.value; i++) { if ((curves[i].prime.len == cvc->primeOrModuluslen) && !memcmp(curves[i].prime.value, cvc->primeOrModulus, cvc->primeOrModuluslen)) { *oid = &curves[i].oid; return SC_SUCCESS; } } return SC_ERROR_INVALID_DATA; } static int sc_pkcs15emu_sc_hsm_get_rsa_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { pubkey->algorithm = SC_ALGORITHM_RSA; pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) return SC_ERROR_OUT_OF_MEMORY; pubkey->alg_id->algorithm = SC_ALGORITHM_RSA; pubkey->u.rsa.modulus.len = cvc->primeOrModuluslen; pubkey->u.rsa.modulus.data = malloc(pubkey->u.rsa.modulus.len); pubkey->u.rsa.exponent.len = cvc->coefficientAorExponentlen; pubkey->u.rsa.exponent.data = malloc(pubkey->u.rsa.exponent.len); if (!pubkey->u.rsa.modulus.data || !pubkey->u.rsa.exponent.data) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.rsa.exponent.data, cvc->coefficientAorExponent, pubkey->u.rsa.exponent.len); memcpy(pubkey->u.rsa.modulus.data, cvc->primeOrModulus, pubkey->u.rsa.modulus.len); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_get_ec_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { struct sc_ec_parameters *ecp; const struct sc_lv_data *oid; int r; pubkey->algorithm = SC_ALGORITHM_EC; r = sc_pkcs15emu_sc_hsm_get_curve_oid(cvc, &oid); if (r != SC_SUCCESS) return r; ecp = calloc(1, sizeof(struct sc_ec_parameters)); if (!ecp) return SC_ERROR_OUT_OF_MEMORY; ecp->der.len = oid->len + 2; ecp->der.value = calloc(ecp->der.len, 1); if (!ecp->der.value) { free(ecp); return SC_ERROR_OUT_OF_MEMORY; } *(ecp->der.value + 0) = 0x06; *(ecp->der.value + 1) = (u8)oid->len; memcpy(ecp->der.value + 2, oid->value, oid->len); ecp->type = 1; // Named curve pubkey->alg_id = (struct sc_algorithm_id *)calloc(1, sizeof(struct sc_algorithm_id)); if (!pubkey->alg_id) { free(ecp->der.value); free(ecp); return SC_ERROR_OUT_OF_MEMORY; } pubkey->alg_id->algorithm = SC_ALGORITHM_EC; pubkey->alg_id->params = ecp; pubkey->u.ec.ecpointQ.value = malloc(cvc->publicPointlen); if (!pubkey->u.ec.ecpointQ.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.ecpointQ.value, cvc->publicPoint, cvc->publicPointlen); pubkey->u.ec.ecpointQ.len = cvc->publicPointlen; pubkey->u.ec.params.der.value = malloc(ecp->der.len); if (!pubkey->u.ec.params.der.value) return SC_ERROR_OUT_OF_MEMORY; memcpy(pubkey->u.ec.params.der.value, ecp->der.value, ecp->der.len); pubkey->u.ec.params.der.len = ecp->der.len; /* FIXME: check return value? */ sc_pkcs15_fix_ec_parameters(ctx, &pubkey->u.ec.params); return SC_SUCCESS; } int sc_pkcs15emu_sc_hsm_get_public_key(struct sc_context *ctx, sc_cvc_t *cvc, struct sc_pkcs15_pubkey *pubkey) { if (cvc->publicPoint && cvc->publicPointlen) { return sc_pkcs15emu_sc_hsm_get_ec_public_key(ctx, cvc, pubkey); } else { return sc_pkcs15emu_sc_hsm_get_rsa_public_key(ctx, cvc, pubkey); } } void sc_pkcs15emu_sc_hsm_free_cvc(sc_cvc_t *cvc) { if (cvc->signature) { free(cvc->signature); cvc->signature = NULL; } if (cvc->primeOrModulus) { free(cvc->primeOrModulus); cvc->primeOrModulus = NULL; } if (cvc->coefficientAorExponent) { free(cvc->coefficientAorExponent); cvc->coefficientAorExponent = NULL; } if (cvc->coefficientB) { free(cvc->coefficientB); cvc->coefficientB = NULL; } if (cvc->basePointG) { free(cvc->basePointG); cvc->basePointG = NULL; } if (cvc->order) { free(cvc->order); cvc->order = NULL; } if (cvc->publicPoint) { free(cvc->publicPoint); cvc->publicPoint = NULL; } if (cvc->cofactor) { free(cvc->cofactor); cvc->cofactor = NULL; } } static int sc_pkcs15emu_sc_hsm_add_pubkey(sc_pkcs15_card_t *p15card, u8 *efbin, size_t len, sc_pkcs15_prkey_info_t *key_info, char *label) { struct sc_context *ctx = p15card->card->ctx; sc_card_t *card = p15card->card; sc_pkcs15_pubkey_info_t pubkey_info; sc_pkcs15_object_t pubkey_obj; struct sc_pkcs15_pubkey pubkey; sc_cvc_t cvc; u8 *cvcpo; int r; cvcpo = efbin; memset(&cvc, 0, sizeof(cvc)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&cvcpo, &len, &cvc); LOG_TEST_RET(ctx, r, "Could decode certificate signing request"); memset(&pubkey, 0, sizeof(pubkey)); r = sc_pkcs15emu_sc_hsm_get_public_key(ctx, &cvc, &pubkey); LOG_TEST_RET(card->ctx, r, "Could not extract public key"); memset(&pubkey_info, 0, sizeof(pubkey_info)); memset(&pubkey_obj, 0, sizeof(pubkey_obj)); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_obj.content.value, &pubkey_obj.content.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey(ctx, &pubkey, &pubkey_info.direct.raw.value, &pubkey_info.direct.raw.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); r = sc_pkcs15_encode_pubkey_as_spki(ctx, &pubkey, &pubkey_info.direct.spki.value, &pubkey_info.direct.spki.len); LOG_TEST_RET(ctx, r, "Could not encode public key"); pubkey_info.id = key_info->id; strlcpy(pubkey_obj.label, label, sizeof(pubkey_obj.label)); if (pubkey.algorithm == SC_ALGORITHM_RSA) { pubkey_info.modulus_length = pubkey.u.rsa.modulus.len << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_ENCRYPT|SC_PKCS15_PRKEY_USAGE_VERIFY|SC_PKCS15_PRKEY_USAGE_WRAP; r = sc_pkcs15emu_add_rsa_pubkey(p15card, &pubkey_obj, &pubkey_info); } else { /* TODO fix if support of non multiple of 8 curves are added */ pubkey_info.field_length = cvc.primeOrModuluslen << 3; pubkey_info.usage = SC_PKCS15_PRKEY_USAGE_VERIFY; r = sc_pkcs15emu_add_ec_pubkey(p15card, &pubkey_obj, &pubkey_info); } LOG_TEST_RET(ctx, r, "Could not add public key"); sc_pkcs15emu_sc_hsm_free_cvc(&cvc); sc_pkcs15_erase_pubkey(&pubkey); return SC_SUCCESS; } /* * Add a key and the key description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_prkd(sc_pkcs15_card_t * p15card, u8 keyid) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t cert_info; sc_pkcs15_object_t cert_obj; struct sc_pkcs15_object prkd; sc_pkcs15_prkey_info_t *key_info; u8 fid[2]; /* enough to hold a complete certificate */ u8 efbin[4096]; u8 *ptr; size_t len; int r; fid[0] = PRKD_PREFIX; fid[1] = keyid; /* Try to select a related EF containing the PKCS#15 description of the key */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); ptr = efbin; memset(&prkd, 0, sizeof(prkd)); r = sc_pkcs15_decode_prkdf_entry(p15card, &prkd, (const u8 **)&ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.PRKD"); /* All keys require user PIN authentication */ prkd.auth_id.len = 1; prkd.auth_id.value[0] = 1; /* * Set private key flag as all keys are private anyway */ prkd.flags |= SC_PKCS15_CO_FLAG_PRIVATE; key_info = (sc_pkcs15_prkey_info_t *)prkd.data; key_info->key_reference = keyid; key_info->path.aid.len = 0; if (prkd.type == SC_PKCS15_TYPE_PRKEY_RSA) { r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkd, key_info); } else { r = sc_pkcs15emu_add_ec_prkey(p15card, &prkd, key_info); } LOG_TEST_RET(card->ctx, r, "Could not add private key to framework"); /* Check if we also have a certificate for the private key */ fid[0] = EE_CERTIFICATE_PREFIX; len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 0); LOG_TEST_RET(card->ctx, r, "Could not read EF"); if (efbin[0] == 0x67) { /* Decode CSR and create public key object */ sc_pkcs15emu_sc_hsm_add_pubkey(p15card, efbin, len, key_info, prkd.label); free(key_info); return SC_SUCCESS; /* Ignore any errors */ } if (efbin[0] != 0x30) { free(key_info); return SC_SUCCESS; } memset(&cert_info, 0, sizeof(cert_info)); memset(&cert_obj, 0, sizeof(cert_obj)); cert_info.id = key_info->id; sc_path_set(&cert_info.path, SC_PATH_TYPE_FILE_ID, fid, 2, 0, 0); cert_info.path.count = -1; if (p15card->opts.use_file_cache) { /* look this up with our AID, which should already be cached from the * call to `read_file`. This may have the side effect that OpenSC's * caching layer re-selects our applet *if the cached file cannot be * found/used* and we may loose the authentication status. We assume * that caching works perfectly without this side effect. */ cert_info.path.aid = sc_hsm_aid; } strlcpy(cert_obj.label, prkd.label, sizeof(cert_obj.label)); r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info); free(key_info); LOG_TEST_RET(card->ctx, r, "Could not add certificate"); return SC_SUCCESS; } /* * Add a data object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_dcod(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_data_info_t *data_info; sc_pkcs15_object_t data_obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = DCOD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&data_obj, 0, sizeof(data_obj)); r = sc_pkcs15_decode_dodf_entry(p15card, &data_obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Could not decode optional EF.DCOD"); data_info = (sc_pkcs15_data_info_t *)data_obj.data; r = sc_pkcs15emu_add_data_object(p15card, &data_obj, data_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } /* * Add a unrelated certificate object and description in PKCS#15 format to the framework */ static int sc_pkcs15emu_sc_hsm_add_cd(sc_pkcs15_card_t * p15card, u8 id) { sc_card_t *card = p15card->card; sc_pkcs15_cert_info_t *cert_info; sc_pkcs15_object_t obj; u8 fid[2]; u8 efbin[512]; const u8 *ptr; size_t len; int r; fid[0] = CD_PREFIX; fid[1] = id; /* Try to select a related EF containing the PKCS#15 description of the data */ len = sizeof efbin; r = read_file(p15card, fid, efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.DCOD"); ptr = efbin; memset(&obj, 0, sizeof(obj)); r = sc_pkcs15_decode_cdf_entry(p15card, &obj, &ptr, &len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.CDOD"); cert_info = (sc_pkcs15_cert_info_t *)obj.data; r = sc_pkcs15emu_add_x509_cert(p15card, &obj, cert_info); LOG_TEST_RET(card->ctx, r, "Could not add data object to framework"); return SC_SUCCESS; } static int sc_pkcs15emu_sc_hsm_read_tokeninfo (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; int r; u8 efbin[512]; size_t len; LOG_FUNC_CALLED(card->ctx); /* Read token info */ len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x03", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); r = sc_pkcs15_parse_tokeninfo(card->ctx, p15card->tokeninfo, efbin, len); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.TokenInfo"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } /* * Initialize PKCS#15 emulation with user PIN, private keys, certificate and data objects * */ static int sc_pkcs15emu_sc_hsm_init (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; sc_hsm_private_data_t *priv = (sc_hsm_private_data_t *) card->drv_data; sc_file_t *file = NULL; sc_path_t path; u8 filelist[MAX_EXT_APDU_LENGTH]; int filelistlength; int r, i; sc_cvc_t devcert; struct sc_app_info *appinfo; struct sc_pkcs15_auth_info pin_info; struct sc_pkcs15_object pin_obj; struct sc_pin_cmd_data pindata; u8 efbin[1024]; u8 *ptr; size_t len; LOG_FUNC_CALLED(card->ctx); appinfo = calloc(1, sizeof(struct sc_app_info)); if (appinfo == NULL) { LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->aid = sc_hsm_aid; appinfo->ddo.aid = sc_hsm_aid; p15card->app = appinfo; sc_path_set(&path, SC_PATH_TYPE_DF_NAME, sc_hsm_aid.value, sc_hsm_aid.len, 0, 0); r = sc_select_file(card, &path, &file); LOG_TEST_RET(card->ctx, r, "Could not select SmartCard-HSM application"); p15card->card->version.hw_major = 24; /* JCOP 2.4.1r3 */ p15card->card->version.hw_minor = 13; if (file && file->prop_attr && file->prop_attr_len >= 2) { p15card->card->version.fw_major = file->prop_attr[file->prop_attr_len - 2]; p15card->card->version.fw_minor = file->prop_attr[file->prop_attr_len - 1]; } sc_file_free(file); /* Read device certificate to determine serial number */ if (priv->EF_C_DevAut && priv->EF_C_DevAut_len) { ptr = priv->EF_C_DevAut; len = priv->EF_C_DevAut_len; } else { len = sizeof efbin; r = read_file(p15card, (u8 *) "\x2F\x02", efbin, &len, 1); LOG_TEST_RET(card->ctx, r, "Skipping optional EF.C_DevAut"); if (len > 0) { /* save EF_C_DevAut for further use */ ptr = realloc(priv->EF_C_DevAut, len); if (ptr) { memcpy(ptr, efbin, len); priv->EF_C_DevAut = ptr; priv->EF_C_DevAut_len = len; } } ptr = efbin; } memset(&devcert, 0 ,sizeof(devcert)); r = sc_pkcs15emu_sc_hsm_decode_cvc(p15card, (const u8 **)&ptr, &len, &devcert); LOG_TEST_RET(card->ctx, r, "Could not decode EF.C_DevAut"); sc_pkcs15emu_sc_hsm_read_tokeninfo(p15card); if (p15card->tokeninfo->label == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->label = strdup("GoID"); } else { p15card->tokeninfo->label = strdup("SmartCard-HSM"); } if (p15card->tokeninfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } if ((p15card->tokeninfo->manufacturer_id != NULL) && !strcmp("(unknown)", p15card->tokeninfo->manufacturer_id)) { free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = NULL; } if (p15card->tokeninfo->manufacturer_id == NULL) { if (p15card->card->type == SC_CARD_TYPE_SC_HSM_GOID || p15card->card->type == SC_CARD_TYPE_SC_HSM_SOC) { p15card->tokeninfo->manufacturer_id = strdup("Bundesdruckerei GmbH"); } else { p15card->tokeninfo->manufacturer_id = strdup("www.CardContact.de"); } if (p15card->tokeninfo->manufacturer_id == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } appinfo->label = strdup(p15card->tokeninfo->label); if (appinfo->label == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); len = strnlen(devcert.chr, sizeof devcert.chr); /* Strip last 5 digit sequence number from CHR */ assert(len >= 8); len -= 5; p15card->tokeninfo->serial_number = calloc(len + 1, 1); if (p15card->tokeninfo->serial_number == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(p15card->tokeninfo->serial_number, devcert.chr, len); *(p15card->tokeninfo->serial_number + len) = 0; sc_hsm_set_serialnr(card, p15card->tokeninfo->serial_number); sc_pkcs15emu_sc_hsm_free_cvc(&devcert); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 1; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x81; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_EXCHANGE_REF_DATA; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; pin_info.attrs.pin.min_length = 6; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 15; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 3; pin_info.max_tries = 3; pin_obj.auth_id.len = 1; pin_obj.auth_id.value[0] = 2; strlcpy(pin_obj.label, "UserPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE|SC_PKCS15_CO_FLAG_MODIFIABLE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = 2; pin_info.path.aid = sc_hsm_aid; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = 0x88; pin_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_LOCAL|SC_PKCS15_PIN_FLAG_INITIALIZED|SC_PKCS15_PIN_FLAG_UNBLOCK_DISABLED|SC_PKCS15_PIN_FLAG_SO_PIN; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_BCD; pin_info.attrs.pin.min_length = 16; pin_info.attrs.pin.stored_length = 0; pin_info.attrs.pin.max_length = 16; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = 15; pin_info.max_tries = 15; strlcpy(pin_obj.label, "SOPIN", sizeof(pin_obj.label)); pin_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE; r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) LOG_FUNC_RETURN(card->ctx, r); if (card->type == SC_CARD_TYPE_SC_HSM_SOC || card->type == SC_CARD_TYPE_SC_HSM_GOID) { /* SC-HSM of this type always has a PIN-Pad */ r = SC_SUCCESS; } else { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x85; r = sc_pin_cmd(card, &pindata, NULL); } if (r == SC_ERROR_DATA_OBJECT_NOT_FOUND) { memset(&pindata, 0, sizeof(pindata)); pindata.cmd = SC_PIN_CMD_GET_INFO; pindata.pin_type = SC_AC_CHV; pindata.pin_reference = 0x86; r = sc_pin_cmd(card, &pindata, NULL); } if ((r != SC_ERROR_DATA_OBJECT_NOT_FOUND) && (r != SC_ERROR_INCORRECT_PARAMETERS)) card->caps |= SC_CARD_CAP_PROTECTED_AUTHENTICATION_PATH; filelistlength = sc_list_files(card, filelist, sizeof(filelist)); LOG_TEST_RET(card->ctx, filelistlength, "Could not enumerate file and key identifier"); for (i = 0; i < filelistlength; i += 2) { switch(filelist[i]) { case KEY_PREFIX: r = sc_pkcs15emu_sc_hsm_add_prkd(p15card, filelist[i + 1]); break; case DCOD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_dcod(p15card, filelist[i + 1]); break; case CD_PREFIX: r = sc_pkcs15emu_sc_hsm_add_cd(p15card, filelist[i + 1]); break; } if (r != SC_SUCCESS) { sc_log(card->ctx, "Error %d adding elements to framework", r); } } LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } int sc_pkcs15emu_sc_hsm_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && (opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK)) { return sc_pkcs15emu_sc_hsm_init(p15card); } else { if (p15card->card->type != SC_CARD_TYPE_SC_HSM && p15card->card->type != SC_CARD_TYPE_SC_HSM_SOC && p15card->card->type != SC_CARD_TYPE_SC_HSM_GOID) { return SC_ERROR_WRONG_CARD; } return sc_pkcs15emu_sc_hsm_init(p15card); } }
./CrossVul/dataset_final_sorted/CWE-119/c/good_343_6
crossvul-cpp_data_good_5733_5
/* * Copyright (c) 2007 Benoit Fouet * Copyright (c) 2010 Stefano Sabatini * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * horizontal flip filter */ #include <string.h> #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" #include "libavutil/pixdesc.h" #include "libavutil/internal.h" #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" typedef struct { int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes int hsub, vsub; ///< chroma subsampling } FlipContext; static int query_formats(AVFilterContext *ctx) { AVFilterFormats *pix_fmts = NULL; int fmt; for (fmt = 0; fmt < AV_PIX_FMT_NB; fmt++) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL || desc->flags & AV_PIX_FMT_FLAG_BITSTREAM || (desc->log2_chroma_w != desc->log2_chroma_h && desc->comp[0].plane == desc->comp[1].plane))) ff_add_format(&pix_fmts, fmt); } ff_set_common_formats(ctx, pix_fmts); return 0; } static int config_props(AVFilterLink *inlink) { FlipContext *s = inlink->dst->priv; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc); s->hsub = pix_desc->log2_chroma_w; s->vsub = pix_desc->log2_chroma_h; return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; FlipContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; uint8_t *inrow, *outrow; int i, j, plane, step; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); /* copy palette if required */ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL) memcpy(out->data[1], in->data[1], AVPALETTE_SIZE); for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { const int width = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w; const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h; step = s->max_step[plane]; outrow = out->data[plane]; inrow = in ->data[plane] + (width - 1) * step; for (i = 0; i < height; i++) { switch (step) { case 1: for (j = 0; j < width; j++) outrow[j] = inrow[-j]; break; case 2: { uint16_t *outrow16 = (uint16_t *)outrow; uint16_t * inrow16 = (uint16_t *) inrow; for (j = 0; j < width; j++) outrow16[j] = inrow16[-j]; } break; case 3: { uint8_t *in = inrow; uint8_t *out = outrow; for (j = 0; j < width; j++, out += 3, in -= 3) { int32_t v = AV_RB24(in); AV_WB24(out, v); } } break; case 4: { uint32_t *outrow32 = (uint32_t *)outrow; uint32_t * inrow32 = (uint32_t *) inrow; for (j = 0; j < width; j++) outrow32[j] = inrow32[-j]; } break; default: for (j = 0; j < width; j++) memcpy(outrow + j*step, inrow - j*step, step); } inrow += in ->linesize[plane]; outrow += out->linesize[plane]; } } av_frame_free(&in); return ff_filter_frame(outlink, out); } static const AVFilterPad avfilter_vf_hflip_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_props, }, { NULL } }; static const AVFilterPad avfilter_vf_hflip_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_hflip = { .name = "hflip", .description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."), .priv_size = sizeof(FlipContext), .query_formats = query_formats, .inputs = avfilter_vf_hflip_inputs, .outputs = avfilter_vf_hflip_outputs, };
./CrossVul/dataset_final_sorted/CWE-119/c/good_5733_5
crossvul-cpp_data_good_4956_2
/* * The Python Imaging Library. * $Id$ * * decoder for uncompressed PCD image data. * * history: * 96-05-10 fl Created * 96-05-18 fl New tables * 97-01-25 fl Use PhotoYCC unpacker * * notes: * This driver supports uncompressed PCD modes only * (resolutions up to 768x512). * * Copyright (c) Fredrik Lundh 1996-97. * Copyright (c) Secret Labs AB 1997. * * See the README file for information on usage and redistribution. */ #include "Imaging.h" int ImagingPcdDecode(Imaging im, ImagingCodecState state, UINT8* buf, int bytes) { int x; int chunk; UINT8* out; UINT8* ptr; ptr = buf; chunk = 3 * state->xsize; for (;;) { /* We need data for two full lines before we can do anything */ if (bytes < chunk) return ptr - buf; /* Unpack first line */ out = state->buffer; for (x = 0; x < state->xsize; x++) { out[0] = ptr[x]; out[1] = ptr[(x+4*state->xsize)/2]; out[2] = ptr[(x+5*state->xsize)/2]; out += 3; } state->shuffle((UINT8*) im->image[state->y], state->buffer, state->xsize); if (++state->y >= state->ysize) return -1; /* This can hardly happen */ /* Unpack second line */ out = state->buffer; for (x = 0; x < state->xsize; x++) { out[0] = ptr[x+state->xsize]; out[1] = ptr[(x+4*state->xsize)/2]; out[2] = ptr[(x+5*state->xsize)/2]; out += 3; } state->shuffle((UINT8*) im->image[state->y], state->buffer, state->xsize); if (++state->y >= state->ysize) return -1; ptr += chunk; bytes -= chunk; } }
./CrossVul/dataset_final_sorted/CWE-119/c/good_4956_2
crossvul-cpp_data_bad_3563_0
/******************************************************************************* * * This file contains the Linux/SCSI LLD virtual SCSI initiator driver * for emulated SAS initiator ports * * © Copyright 2011 RisingTide Systems LLC. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ****************************************************************************/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/configfs.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_transport.h> #include <target/target_core_fabric_ops.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_fabric_lib.h> #include <target/target_core_configfs.h> #include <target/target_core_device.h> #include <target/target_core_tpg.h> #include <target/target_core_tmr.h> #include "tcm_loop.h" #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) /* Local pointer to allocated TCM configfs fabric module */ static struct target_fabric_configfs *tcm_loop_fabric_configfs; static struct kmem_cache *tcm_loop_cmd_cache; static int tcm_loop_hba_no_cnt; /* * Allocate a tcm_loop cmd descriptor from target_core_mod code * * Can be called from interrupt context in tcm_loop_queuecommand() below */ static struct se_cmd *tcm_loop_allocate_core_cmd( struct tcm_loop_hba *tl_hba, struct se_portal_group *se_tpg, struct scsi_cmnd *sc) { struct se_cmd *se_cmd; struct se_session *se_sess; struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; struct tcm_loop_cmd *tl_cmd; int sam_task_attr; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" " does not exist\n"); set_host_byte(sc, DID_ERROR); return NULL; } se_sess = tl_nexus->se_sess; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); if (!tl_cmd) { printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); set_host_byte(sc, DID_ERROR); return NULL; } se_cmd = &tl_cmd->tl_se_cmd; /* * Save the pointer to struct scsi_cmnd *sc */ tl_cmd->sc = sc; /* * Locate the SAM Task Attr from struct scsi_cmnd * */ if (sc->device->tagged_supported) { switch (sc->tag) { case HEAD_OF_QUEUE_TAG: sam_task_attr = MSG_HEAD_TAG; break; case ORDERED_QUEUE_TAG: sam_task_attr = MSG_ORDERED_TAG; break; default: sam_task_attr = MSG_SIMPLE_TAG; break; } } else sam_task_attr = MSG_SIMPLE_TAG; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, &tl_cmd->tl_sense_buf[0]); /* * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) se_cmd->t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); set_host_byte(sc, DID_NO_CONNECT); return NULL; } return se_cmd; } /* * Called by struct target_core_fabric_ops->new_cmd_map() * * Always called in process context. A non zero return value * here will signal to handle an exception based on the return code. */ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; struct scatterlist *sgl_bidi = NULL; u32 sgl_bidi_count = 0; int ret; /* * Allocate the necessary tasks to complete the received CDB+data */ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); if (ret == -ENOMEM) { /* Out of Resources */ return PYX_TRANSPORT_LU_COMM_FAILURE; } else if (ret == -EINVAL) { /* * Handle case for SAM_STAT_RESERVATION_CONFLICT */ if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) return PYX_TRANSPORT_RESERVATION_CONFLICT; /* * Otherwise, return SAM_STAT_CHECK_CONDITION and return * sense data. */ return PYX_TRANSPORT_USE_SENSE_REASON; } /* * For BIDI commands, pass in the extra READ buffer * to transport_generic_map_mem_to_cmd() below.. */ if (se_cmd->t_tasks_bidi) { struct scsi_data_buffer *sdb = scsi_in(sc); sgl_bidi = sdb->table.sgl; sgl_bidi_count = sdb->table.nents; } /* * Map the SG memory into struct se_mem->page linked list using the same * physical memory at sg->page_link. */ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (ret < 0) return PYX_TRANSPORT_LU_COMM_FAILURE; return 0; } /* * Called from struct target_core_fabric_ops->check_stop_free() */ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) { /* * Do not release struct se_cmd's containing a valid TMR * pointer. These will be released directly in tcm_loop_device_reset() * with transport_generic_free_cmd(). */ if (se_cmd->se_tmr_req) return; /* * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ transport_generic_free_cmd(se_cmd, 0, 0); } static void tcm_loop_release_cmd(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); } static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { return sprintf(buffer, "tcm_loop_proc_info()\n"); } static int tcm_loop_driver_probe(struct device *); static int tcm_loop_driver_remove(struct device *); static int pseudo_lld_bus_match(struct device *dev, struct device_driver *dev_driver) { return 1; } static struct bus_type tcm_loop_lld_bus = { .name = "tcm_loop_bus", .match = pseudo_lld_bus_match, .probe = tcm_loop_driver_probe, .remove = tcm_loop_driver_remove, }; static struct device_driver tcm_loop_driverfs = { .name = "tcm_loop", .bus = &tcm_loop_lld_bus, }; /* * Used with root_device_register() in tcm_loop_alloc_core_bus() below */ struct device *tcm_loop_primary; /* * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() */ static int tcm_loop_change_queue_depth( struct scsi_device *sdev, int depth, int reason) { switch (reason) { case SCSI_QDEPTH_DEFAULT: scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); break; case SCSI_QDEPTH_QFULL: scsi_track_queue_full(sdev, depth); break; case SCSI_QDEPTH_RAMP_UP: scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); break; default: return -EOPNOTSUPP; } return sdev->queue_depth; } /* * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) */ static int tcm_loop_queuecommand( struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct se_cmd *se_cmd; struct se_portal_group *se_tpg; struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" " scsi_buf_len: %u\n", sc->device->host->host_no, sc->device->id, sc->device->channel, sc->device->lun, sc->cmnd[0], scsi_bufflen(sc)); /* * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; se_tpg = &tl_tpg->tl_se_tpg; /* * Determine the SAM Task Attribute and allocate tl_cmd and * tl_cmd->tl_se_cmd from TCM infrastructure */ se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); if (!se_cmd) { sc->scsi_done(sc); return 0; } /* * Queue up the newly allocated to be processed in TCM thread context. */ transport_generic_handle_cdb_map(se_cmd); return 0; } /* * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */ static int tcm_loop_device_reset(struct scsi_cmnd *sc) { struct se_cmd *se_cmd = NULL; struct se_portal_group *se_tpg; struct se_session *se_sess; struct tcm_loop_cmd *tl_cmd = NULL; struct tcm_loop_hba *tl_hba; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tmr *tl_tmr = NULL; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; /* * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); /* * Locate the tl_nexus and se_sess pointers */ tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { printk(KERN_ERR "Unable to perform device reset without" " active I_T Nexus\n"); return FAILED; } se_sess = tl_nexus->se_sess; /* * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; se_tpg = &tl_tpg->tl_se_tpg; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); return FAILED; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); if (!tl_tmr) { printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); goto release; } init_waitqueue_head(&tl_tmr->tl_tmr_wait); se_cmd = &tl_cmd->tl_se_cmd; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, &tl_cmd->tl_sense_buf[0]); /* * Allocate the LUN_RESET TMR */ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* * Locate the underlying TCM struct se_lun from sc->device->lun */ if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) goto release; /* * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() * to wake us up. */ transport_generic_handle_tmr(se_cmd); wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); /* * The TMR LUN_RESET has completed, check the response status and * then release allocations. */ ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; release: if (se_cmd) transport_generic_free_cmd(se_cmd, 1, 0); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); return ret; } static int tcm_loop_slave_alloc(struct scsi_device *sd) { set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); return 0; } static int tcm_loop_slave_configure(struct scsi_device *sd) { return 0; } static struct scsi_host_template tcm_loop_driver_template = { .proc_info = tcm_loop_proc_info, .proc_name = "tcm_loopback", .name = "TCM_Loopback", .queuecommand = tcm_loop_queuecommand, .change_queue_depth = tcm_loop_change_queue_depth, .eh_device_reset_handler = tcm_loop_device_reset, .can_queue = TL_SCSI_CAN_QUEUE, .this_id = -1, .sg_tablesize = TL_SCSI_SG_TABLESIZE, .cmd_per_lun = TL_SCSI_CMD_PER_LUN, .max_sectors = TL_SCSI_MAX_SECTORS, .use_clustering = DISABLE_CLUSTERING, .slave_alloc = tcm_loop_slave_alloc, .slave_configure = tcm_loop_slave_configure, .module = THIS_MODULE, }; static int tcm_loop_driver_probe(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; int error; tl_hba = to_tcm_loop_hba(dev); sh = scsi_host_alloc(&tcm_loop_driver_template, sizeof(struct tcm_loop_hba)); if (!sh) { printk(KERN_ERR "Unable to allocate struct scsi_host\n"); return -ENODEV; } tl_hba->sh = sh; /* * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata */ *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; /* * Setup single ID, Channel and LUN for now.. */ sh->max_id = 2; sh->max_lun = 0; sh->max_channel = 0; sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; error = scsi_add_host(sh, &tl_hba->dev); if (error) { printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); scsi_host_put(sh); return -ENODEV; } return 0; } static int tcm_loop_driver_remove(struct device *dev) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; tl_hba = to_tcm_loop_hba(dev); sh = tl_hba->sh; scsi_remove_host(sh); scsi_host_put(sh); return 0; } static void tcm_loop_release_adapter(struct device *dev) { struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); kfree(tl_hba); } /* * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c */ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) { int ret; tl_hba->dev.bus = &tcm_loop_lld_bus; tl_hba->dev.parent = tcm_loop_primary; tl_hba->dev.release = &tcm_loop_release_adapter; dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); ret = device_register(&tl_hba->dev); if (ret) { printk(KERN_ERR "device_register() failed for" " tl_hba->dev: %d\n", ret); return -ENODEV; } return 0; } /* * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated * tcm_loop SCSI bus. */ static int tcm_loop_alloc_core_bus(void) { int ret; tcm_loop_primary = root_device_register("tcm_loop_0"); if (IS_ERR(tcm_loop_primary)) { printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); return PTR_ERR(tcm_loop_primary); } ret = bus_register(&tcm_loop_lld_bus); if (ret) { printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); goto dev_unreg; } ret = driver_register(&tcm_loop_driverfs); if (ret) { printk(KERN_ERR "driver_register() failed for" "tcm_loop_driverfs\n"); goto bus_unreg; } printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); return ret; bus_unreg: bus_unregister(&tcm_loop_lld_bus); dev_unreg: root_device_unregister(tcm_loop_primary); return ret; } static void tcm_loop_release_core_bus(void) { driver_unregister(&tcm_loop_driverfs); bus_unregister(&tcm_loop_lld_bus); root_device_unregister(tcm_loop_primary); printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); } static char *tcm_loop_get_fabric_name(void) { return "loopback"; } static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; /* * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() * time based on the protocol dependent prefix of the passed configfs group. * * Based upon tl_proto_id, TCM_Loop emulates the requested fabric * ProtocolID using target_core_fabric_lib.c symbols. */ switch (tl_hba->tl_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_fabric_proto_ident(se_tpg); case SCSI_PROTOCOL_FCP: return fc_get_fabric_proto_ident(se_tpg); case SCSI_PROTOCOL_ISCSI: return iscsi_get_fabric_proto_ident(se_tpg); default: printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } return sas_get_fabric_proto_ident(se_tpg); } static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; /* * Return the passed NAA identifier for the SAS Target Port */ return &tl_tpg->tl_hba->tl_wwn_address[0]; } static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; /* * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 * to represent the SCSI Target Port. */ return tl_tpg->tl_tpgt; } static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) { return 1; } static u32 tcm_loop_get_pr_transport_id( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); case SCSI_PROTOCOL_FCP: return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); case SCSI_PROTOCOL_ISCSI: return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); default: printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); } static u32 tcm_loop_get_pr_transport_id_len( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { case SCSI_PROTOCOL_SAS: return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); case SCSI_PROTOCOL_FCP: return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); case SCSI_PROTOCOL_ISCSI: return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); default: printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); } /* * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. */ static char *tcm_loop_parse_pr_out_transport_id( struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { struct tcm_loop_tpg *tl_tpg = (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { case SCSI_PROTOCOL_SAS: return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); case SCSI_PROTOCOL_FCP: return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); case SCSI_PROTOCOL_ISCSI: return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); default: printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); } /* * Returning (1) here allows for target_core_mod struct se_node_acl to be generated * based upon the incoming fabric dependent SCSI Initiator Port */ static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) { return 1; } static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) { return 0; } /* * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest */ static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) { return 0; } /* * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will * never be called for TCM_Loop by target_core_fabric_configfs.c code. * It has been added here as a nop for target_fabric_tf_ops_check() */ static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) { return 0; } static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( struct se_portal_group *se_tpg) { struct tcm_loop_nacl *tl_nacl; tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); if (!tl_nacl) { printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); return NULL; } return &tl_nacl->se_node_acl; } static void tcm_loop_tpg_release_fabric_acl( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl) { struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, struct tcm_loop_nacl, se_node_acl); kfree(tl_nacl); } static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) { return 1; } static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) { /* * Assume struct scsi_cmnd is not in remove state.. */ return 0; } static int tcm_loop_sess_logged_in(struct se_session *se_sess) { /* * Assume that TL Nexus is always active */ return 1; } static u32 tcm_loop_sess_get_index(struct se_session *se_sess) { return 1; } static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) { return; } static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) { return 1; } static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); return tl_cmd->sc_cmd_state; } static int tcm_loop_shutdown_session(struct se_session *se_sess) { return 0; } static void tcm_loop_close_session(struct se_session *se_sess) { return; }; static void tcm_loop_stop_session( struct se_session *se_sess, int sess_sleep, int conn_sleep) { return; } static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) { return; } static int tcm_loop_write_pending(struct se_cmd *se_cmd) { /* * Since Linux/SCSI has already sent down a struct scsi_cmnd * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array * memory, and memory has already been mapped to struct se_cmd->t_mem_list * format with transport_generic_map_mem_to_cmd(). * * We now tell TCM to add this WRITE CDB directly into the TCM storage * object execution queue. */ transport_generic_process_write(se_cmd); return 0; } static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) { return 0; } static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); sc->result = SAM_STAT_GOOD; set_host_byte(sc, DID_OK); sc->scsi_done(sc); return 0; } static int tcm_loop_queue_status(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); if (se_cmd->sense_buffer && ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { memcpy(sc->sense_buffer, se_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); } else sc->result = se_cmd->scsi_status; set_host_byte(sc, DID_OK); sc->scsi_done(sc); return 0; } static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) { struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; /* * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead * and wake up the wait_queue_head_t in tcm_loop_device_reset() */ atomic_set(&tl_tmr->tmr_complete, 1); wake_up(&tl_tmr->tl_tmr_wait); return 0; } static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) { return 0; } static u16 tcm_loop_get_fabric_sense_len(void) { return 0; } static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) { switch (tl_hba->tl_proto_id) { case SCSI_PROTOCOL_SAS: return "SAS"; case SCSI_PROTOCOL_FCP: return "FCP"; case SCSI_PROTOCOL_ISCSI: return "iSCSI"; default: break; } return "Unknown"; } /* Start items for tcm_loop_port_cit */ static int tcm_loop_port_link( struct se_portal_group *se_tpg, struct se_lun *lun) { struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; atomic_inc(&tl_tpg->tl_tpg_port_count); smp_mb__after_atomic_inc(); /* * Add Linux/SCSI struct scsi_device by HCTL */ scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); return 0; } static void tcm_loop_port_unlink( struct se_portal_group *se_tpg, struct se_lun *se_lun) { struct scsi_device *sd; struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); tl_hba = tl_tpg->tl_hba; sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); if (!sd) { printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); return; } /* * Remove Linux/SCSI struct scsi_device by HCTL */ scsi_remove_device(sd); scsi_device_put(sd); atomic_dec(&tl_tpg->tl_tpg_port_count); smp_mb__after_atomic_dec(); printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); } /* End items for tcm_loop_port_cit */ /* Start items for tcm_loop_nexus_cit */ static int tcm_loop_make_nexus( struct tcm_loop_tpg *tl_tpg, const char *name) { struct se_portal_group *se_tpg; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; struct tcm_loop_nexus *tl_nexus; int ret = -ENOMEM; if (tl_tpg->tl_hba->tl_nexus) { printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); if (!tl_nexus) { printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); return -ENOMEM; } /* * Initialize the struct se_session pointer */ tl_nexus->se_sess = transport_init_session(); if (IS_ERR(tl_nexus->se_sess)) { ret = PTR_ERR(tl_nexus->se_sess); goto out; } /* * Since we are running in 'demo mode' this call with generate a * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI * Initiator port name of the passed configfs group 'name'. */ tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( se_tpg, (unsigned char *)name); if (!tl_nexus->se_sess->se_node_acl) { transport_free_session(tl_nexus->se_sess); goto out; } /* * Now, register the SAS I_T Nexus as active with the call to * transport_register_session() */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); return 0; out: kfree(tl_nexus); return ret; } static int tcm_loop_drop_nexus( struct tcm_loop_tpg *tpg) { struct se_session *se_sess; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_hba *tl_hba = tpg->tl_hba; tl_nexus = tpg->tl_hba->tl_nexus; if (!tl_nexus) return -ENODEV; se_sess = tl_nexus->se_sess; if (!se_sess) return -ENODEV; if (atomic_read(&tpg->tl_tpg_port_count)) { printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" " active TPG port count: %d\n", atomic_read(&tpg->tl_tpg_port_count)); return -EPERM; } printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), tl_nexus->se_sess->se_node_acl->initiatorname); /* * Release the SCSI I_T Nexus to the emulated SAS Target Port */ transport_deregister_session(tl_nexus->se_sess); tpg->tl_hba->tl_nexus = NULL; kfree(tl_nexus); return 0; } /* End items for tcm_loop_nexus_cit */ static ssize_t tcm_loop_tpg_show_nexus( struct se_portal_group *se_tpg, char *page) { struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_nexus *tl_nexus; ssize_t ret; tl_nexus = tl_tpg->tl_hba->tl_nexus; if (!tl_nexus) return -ENODEV; ret = snprintf(page, PAGE_SIZE, "%s\n", tl_nexus->se_sess->se_node_acl->initiatorname); return ret; } static ssize_t tcm_loop_tpg_store_nexus( struct se_portal_group *se_tpg, const char *page, size_t count) { struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; int ret; /* * Shutdown the active I_T nexus if 'NULL' is passed.. */ if (!strncmp(page, "NULL", 4)) { ret = tcm_loop_drop_nexus(tl_tpg); return (!ret) ? count : ret; } /* * Otherwise make sure the passed virtual Initiator port WWN matches * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call * tcm_loop_make_nexus() */ if (strlen(page) >= TL_WWN_ADDR_LEN) { printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" " max: %d\n", page, TL_WWN_ADDR_LEN); return -EINVAL; } snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); ptr = strstr(i_port, "naa."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { printk(KERN_ERR "Passed SAS Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[0]; goto check_newline; } ptr = strstr(i_port, "fc."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { printk(KERN_ERR "Passed FCP Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[3]; /* Skip over "fc." */ goto check_newline; } ptr = strstr(i_port, "iqn."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[0]; goto check_newline; } printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" " %s\n", i_port); return -EINVAL; /* * Clear any trailing newline for the NAA WWN */ check_newline: if (i_port[strlen(i_port)-1] == '\n') i_port[strlen(i_port)-1] = '\0'; ret = tcm_loop_make_nexus(tl_tpg, port_ptr); if (ret < 0) return ret; return count; } TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); static struct configfs_attribute *tcm_loop_tpg_attrs[] = { &tcm_loop_tpg_nexus.attr, NULL, }; /* Start items for tcm_loop_naa_cit */ struct se_portal_group *tcm_loop_make_naa_tpg( struct se_wwn *wwn, struct config_group *group, const char *name) { struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); struct tcm_loop_tpg *tl_tpg; char *tpgt_str, *end_ptr; int ret; unsigned short int tpgt; tpgt_str = strstr(name, "tpgt_"); if (!tpgt_str) { printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" " group\n"); return ERR_PTR(-EINVAL); } tpgt_str += 5; /* Skip ahead of "tpgt_" */ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); if (tpgt > TL_TPGS_PER_HBA) { printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" " %u\n", tpgt, TL_TPGS_PER_HBA); return ERR_PTR(-EINVAL); } tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; tl_tpg->tl_hba = tl_hba; tl_tpg->tl_tpgt = tpgt; /* * Register the tl_tpg as a emulated SAS TCM Target Endpoint */ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return ERR_PTR(-ENOMEM); printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); return &tl_tpg->tl_se_tpg; } void tcm_loop_drop_naa_tpg( struct se_portal_group *se_tpg) { struct se_wwn *wwn = se_tpg->se_tpg_wwn; struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_hba *tl_hba; unsigned short tpgt; tl_hba = tl_tpg->tl_hba; tpgt = tl_tpg->tl_tpgt; /* * Release the I_T Nexus for the Virtual SAS link if present */ tcm_loop_drop_nexus(tl_tpg); /* * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint */ core_tpg_deregister(se_tpg); printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); } /* End items for tcm_loop_naa_cit */ /* Start items for tcm_loop_cit */ struct se_wwn *tcm_loop_make_scsi_hba( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct tcm_loop_hba *tl_hba; struct Scsi_Host *sh; char *ptr; int ret, off = 0; tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); if (!tl_hba) { printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); return ERR_PTR(-ENOMEM); } /* * Determine the emulated Protocol Identifier and Target Port Name * based on the incoming configfs directory name. */ ptr = strstr(name, "naa."); if (ptr) { tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; goto check_len; } ptr = strstr(name, "fc."); if (ptr) { tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; off = 3; /* Skip over "fc." */ goto check_len; } ptr = strstr(name, "iqn."); if (ptr) { tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; goto check_len; } printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" " %s\n", name); return ERR_PTR(-EINVAL); check_len: if (strlen(name) >= TL_WWN_ADDR_LEN) { printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); kfree(tl_hba); return ERR_PTR(-EINVAL); } snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); /* * Call device_register(tl_hba->dev) to register the emulated * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after * device_register() callbacks in tcm_loop_driver_probe() */ ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); if (ret) goto out; sh = tl_hba->sh; tcm_loop_hba_no_cnt++; printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" " %s Address: %s at Linux/SCSI Host ID: %d\n", tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); return &tl_hba->tl_hba_wwn; out: kfree(tl_hba); return ERR_PTR(ret); } void tcm_loop_drop_scsi_hba( struct se_wwn *wwn) { struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); int host_no = tl_hba->sh->host_no; /* * Call device_unregister() on the original tl_hba->dev. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will * release *tl_hba; */ device_unregister(&tl_hba->dev); printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" " SAS Address: %s at Linux/SCSI Host ID: %d\n", config_item_name(&wwn->wwn_group.cg_item), host_no); } /* Start items for tcm_loop_cit */ static ssize_t tcm_loop_wwn_show_attr_version( struct target_fabric_configfs *tf, char *page) { return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); } TF_WWN_ATTR_RO(tcm_loop, version); static struct configfs_attribute *tcm_loop_wwn_attrs[] = { &tcm_loop_wwn_version.attr, NULL, }; /* End items for tcm_loop_cit */ static int tcm_loop_register_configfs(void) { struct target_fabric_configfs *fabric; struct config_group *tf_cg; int ret; /* * Set the TCM Loop HBA counter to zero */ tcm_loop_hba_no_cnt = 0; /* * Register the top level struct config_item_type with TCM core */ fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); if (IS_ERR(fabric)) { printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); return PTR_ERR(fabric); } /* * Setup the fabric API of function pointers used by target_core_mod */ fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; fabric->tf_ops.tpg_get_pr_transport_id_len = &tcm_loop_get_pr_transport_id_len; fabric->tf_ops.tpg_parse_pr_out_transport_id = &tcm_loop_parse_pr_out_transport_id; fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; fabric->tf_ops.tpg_check_demo_mode_cache = &tcm_loop_check_demo_mode_cache; fabric->tf_ops.tpg_check_demo_mode_write_protect = &tcm_loop_check_demo_mode_write_protect; fabric->tf_ops.tpg_check_prod_mode_write_protect = &tcm_loop_check_prod_mode_write_protect; /* * The TCM loopback fabric module runs in demo-mode to a local * virtual SCSI device, so fabric dependent initator ACLs are * not required. */ fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; fabric->tf_ops.tpg_release_fabric_acl = &tcm_loop_tpg_release_fabric_acl; fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; /* * Used for setting up remaining TCM resources in process context */ fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; fabric->tf_ops.close_session = &tcm_loop_close_session; fabric->tf_ops.stop_session = &tcm_loop_stop_session; fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; fabric->tf_ops.sess_get_initiator_sid = NULL; fabric->tf_ops.write_pending = &tcm_loop_write_pending; fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; /* * Not used for TCM loopback */ fabric->tf_ops.set_default_node_attributes = &tcm_loop_set_default_node_attributes; fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; tf_cg = &fabric->tf_group; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c */ fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; /* * fabric_post_link() and fabric_pre_unlink() are used for * registration and release of TCM Loop Virtual SCSI LUNs. */ fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; fabric->tf_ops.fabric_make_np = NULL; fabric->tf_ops.fabric_drop_np = NULL; /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; /* * Once fabric->tf_ops has been setup, now register the fabric for * use within TCM */ ret = target_fabric_configfs_register(fabric); if (ret < 0) { printk(KERN_ERR "target_fabric_configfs_register() for" " TCM_Loop failed!\n"); target_fabric_configfs_free(fabric); return -1; } /* * Setup our local pointer to *fabric. */ tcm_loop_fabric_configfs = fabric; printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" " tcm_loop_fabric_configfs\n"); return 0; } static void tcm_loop_deregister_configfs(void) { if (!tcm_loop_fabric_configfs) return; target_fabric_configfs_deregister(tcm_loop_fabric_configfs); tcm_loop_fabric_configfs = NULL; printk(KERN_INFO "TCM_LOOP[0] - Cleared" " tcm_loop_fabric_configfs\n"); } static int __init tcm_loop_fabric_init(void) { int ret; tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", sizeof(struct tcm_loop_cmd), __alignof__(struct tcm_loop_cmd), 0, NULL); if (!tcm_loop_cmd_cache) { printk(KERN_ERR "kmem_cache_create() for" " tcm_loop_cmd_cache failed\n"); return -ENOMEM; } ret = tcm_loop_alloc_core_bus(); if (ret) return ret; ret = tcm_loop_register_configfs(); if (ret) { tcm_loop_release_core_bus(); return ret; } return 0; } static void __exit tcm_loop_fabric_exit(void) { tcm_loop_deregister_configfs(); tcm_loop_release_core_bus(); kmem_cache_destroy(tcm_loop_cmd_cache); } MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); MODULE_LICENSE("GPL"); module_init(tcm_loop_fabric_init); module_exit(tcm_loop_fabric_exit);
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3563_0
crossvul-cpp_data_good_159_1
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * Recursive descent parser for code execution * ---------------------------------------------------------------------------- */ #include "jsparse.h" #include "jsinteractive.h" #include "jswrapper.h" #include "jsnative.h" #include "jswrap_object.h" // for function_replacewith #include "jswrap_functions.h" // insane check for eval in jspeFunctionCall #include "jswrap_json.h" // for jsfPrintJSON #include "jswrap_espruino.h" // for jswrap_espruino_memoryArea #ifndef SAVE_ON_FLASH #include "jswrap_regexp.h" // for jswrap_regexp_constructor #endif /* Info about execution when Parsing - this saves passing it on the stack * for each call */ JsExecInfo execInfo; // ----------------------------------------------- Forward decls JsVar *jspeAssignmentExpression(); JsVar *jspeExpression(); JsVar *jspeUnaryExpression(); void jspeBlock(); void jspeBlockNoBrackets(); JsVar *jspeStatement(); JsVar *jspeFactor(); void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName); #ifndef SAVE_ON_FLASH JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a); #endif // ----------------------------------------------- Utils #define JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, CLEANUP_CODE, RETURN_VAL) { if (!jslMatch((TOKEN))) { CLEANUP_CODE; return RETURN_VAL; } } #define JSP_MATCH_WITH_RETURN(TOKEN, RETURN_VAL) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , RETURN_VAL) #define JSP_MATCH(TOKEN) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , 0) // Match where the user could have given us the wrong token #define JSP_ASSERT_MATCH(TOKEN) { assert(lex->tk==(TOKEN));jslGetNextToken(); } // Match where if we have the wrong token, it's an internal error #define JSP_SHOULD_EXECUTE (((execInfo.execute)&EXEC_RUN_MASK)==EXEC_YES) #define JSP_SAVE_EXECUTE() JsExecFlags oldExecute = execInfo.execute #define JSP_RESTORE_EXECUTE() execInfo.execute = (execInfo.execute&(JsExecFlags)(~EXEC_SAVE_RESTORE_MASK)) | (oldExecute&EXEC_SAVE_RESTORE_MASK); #define JSP_HAS_ERROR (((execInfo.execute)&EXEC_ERROR_MASK)!=0) #define JSP_SHOULDNT_PARSE (((execInfo.execute)&EXEC_NO_PARSE_MASK)!=0) ALWAYS_INLINE void jspDebuggerLoopIfCtrlC() { #ifdef USE_DEBUGGER if (execInfo.execute & EXEC_CTRL_C_WAIT && JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } /// if interrupting execution, this is set bool jspIsInterrupted() { return (execInfo.execute & EXEC_INTERRUPTED)!=0; } /// if interrupting execution, this is set void jspSetInterrupted(bool interrupt) { if (interrupt) execInfo.execute = execInfo.execute | EXEC_INTERRUPTED; else execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_INTERRUPTED; } /// Set the error flag - set lineReported if we've already output the line number void jspSetError(bool lineReported) { execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_YES) | EXEC_ERROR; if (lineReported) execInfo.execute |= EXEC_ERROR_LINE_REPORTED; } bool jspHasError() { return JSP_HAS_ERROR; } void jspReplaceWith(JsVar *dst, JsVar *src) { // If this is an index in an array buffer, write directly into the array buffer if (jsvIsArrayBufferName(dst)) { size_t idx = (size_t)jsvGetInteger(dst); JsVar *arrayBuffer = jsvLock(jsvGetFirstChild(dst)); jsvArrayBufferSet(arrayBuffer, idx, src); jsvUnLock(arrayBuffer); return; } // if destination isn't there, isn't a 'name', or is used, give an error if (!jsvIsName(dst)) { jsExceptionHere(JSET_ERROR, "Unable to assign value to non-reference %t", dst); return; } jsvSetValueOfName(dst, src); /* If dst is flagged as a new child, it means that * it was previously undefined, and we need to add it to * the given object when it is set. */ if (jsvIsNewChild(dst)) { // Get what it should have been a child of JsVar *parent = jsvLock(jsvGetNextSibling(dst)); if (!jsvIsString(parent)) { // if we can't find a char in a string we still return a NewChild, // but we can't add character back in if (!jsvHasChildren(parent)) { jsExceptionHere(JSET_ERROR, "Field or method \"%s\" does not already exist, and can't create it on %t", dst, parent); } else { // Remove the 'new child' flagging jsvUnRef(parent); jsvSetNextSibling(dst, 0); jsvUnRef(parent); jsvSetPrevSibling(dst, 0); // Add to the parent jsvAddName(parent, dst); } } jsvUnLock(parent); } } bool jspeiAddScope(JsVar *scope) { if (execInfo.scopeCount >= JSPARSE_MAX_SCOPES) { jsExceptionHere(JSET_ERROR, "Maximum number of scopes exceeded"); jspSetError(false); return false; } execInfo.scopes[execInfo.scopeCount++] = jsvLockAgain(scope); return true; } void jspeiRemoveScope() { if (execInfo.scopeCount <= 0) { jsExceptionHere(JSET_INTERNALERROR, "Too many scopes removed"); jspSetError(false); return; } jsvUnLock(execInfo.scopes[--execInfo.scopeCount]); } JsVar *jspeiFindInScopes(const char *name) { int i; for (i=execInfo.scopeCount-1;i>=0;i--) { JsVar *ref = jsvFindChildFromString(execInfo.scopes[i], name, false); if (ref) return ref; } return jsvFindChildFromString(execInfo.root, name, false); } // TODO: get rid of these, use jspeiGetTopScope instead JsVar *jspeiFindOnTop(const char *name, bool createIfNotFound) { if (execInfo.scopeCount>0) return jsvFindChildFromString(execInfo.scopes[execInfo.scopeCount-1], name, createIfNotFound); return jsvFindChildFromString(execInfo.root, name, createIfNotFound); } JsVar *jspeiFindNameOnTop(JsVar *childName, bool createIfNotFound) { if (execInfo.scopeCount>0) return jsvFindChildFromVar(execInfo.scopes[execInfo.scopeCount-1], childName, createIfNotFound); return jsvFindChildFromVar(execInfo.root, childName, createIfNotFound); } /** Here we assume that we have already looked in the parent itself - * and are now going down looking at the stuff it inherited */ JsVar *jspeiFindChildFromStringInParents(JsVar *parent, const char *name) { if (jsvIsObject(parent)) { // If an object, look for an 'inherits' var JsVar *inheritsFrom = jsvObjectGetChild(parent, JSPARSE_INHERITS_VAR, 0); // if there's no inheritsFrom, just default to 'Object.prototype' if (!inheritsFrom) { JsVar *obj = jsvObjectGetChild(execInfo.root, "Object", 0); if (obj) { inheritsFrom = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); jsvUnLock(obj); } } if (inheritsFrom && inheritsFrom!=parent) { // we have what it inherits from (this is ACTUALLY the prototype var) // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/proto JsVar *child = jsvFindChildFromString(inheritsFrom, name, false); if (!child) child = jspeiFindChildFromStringInParents(inheritsFrom, name); jsvUnLock(inheritsFrom); if (child) return child; } else jsvUnLock(inheritsFrom); } else { // Not actually an object - but might be an array/string/etc const char *objectName = jswGetBasicObjectName(parent); while (objectName) { JsVar *objName = jsvFindChildFromString(execInfo.root, objectName, false); if (objName) { JsVar *result = 0; JsVar *obj = jsvSkipNameAndUnLock(objName); // could be something the user has made - eg. 'Array=1' if (jsvHasChildren(obj)) { // We have found an object with this name - search for the prototype var JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); if (proto) { result = jsvFindChildFromString(proto, name, false); jsvUnLock(proto); } } jsvUnLock(obj); if (result) return result; } /* We haven't found anything in the actual object, we should check the 'Object' itself eg, we tried 'String', so now we should try 'Object'. Built-in types don't have room for a prototype field, so we hard-code it */ objectName = jswGetBasicObjectPrototypeName(objectName); } } // no luck! return 0; } JsVar *jspeiGetScopesAsVar() { if (execInfo.scopeCount==0) return 0; if (execInfo.scopeCount==1) return jsvLockAgain(execInfo.scopes[0]); JsVar *arr = jsvNewEmptyArray(); int i; for (i=0;i<execInfo.scopeCount;i++) { JsVar *idx = jsvMakeIntoVariableName(jsvNewFromInteger(i), execInfo.scopes[i]); if (!idx) { // out of memory jspSetError(false); return arr; } jsvAddName(arr, idx); jsvUnLock(idx); } return arr; } void jspeiLoadScopesFromVar(JsVar *arr) { execInfo.scopeCount = 0; if (jsvIsArray(arr)) { JsvObjectIterator it; jsvObjectIteratorNew(&it, arr); while (jsvObjectIteratorHasValue(&it)) { execInfo.scopes[execInfo.scopeCount++] = jsvObjectIteratorGetValue(&it); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); } else execInfo.scopes[execInfo.scopeCount++] = jsvLockAgain(arr); } // ----------------------------------------------- bool jspCheckStackPosition() { if (jsuGetFreeStack() < 512) { // giving us 512 bytes leeway jsExceptionHere(JSET_ERROR, "Too much recursion - the stack is about to overflow"); jspSetInterrupted(true); return false; } return true; } // Set execFlags such that we are not executing void jspSetNoExecute() { execInfo.execute = (execInfo.execute & (JsExecFlags)(int)~EXEC_RUN_MASK) | EXEC_NO; } void jspAppendStackTrace(JsVar *stackTrace) { JsvStringIterator it; jsvStringIteratorNew(&it, stackTrace, 0); jsvStringIteratorGotoEnd(&it); jslPrintPosition((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart); jslPrintTokenLineMarker((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart, 0); jsvStringIteratorFree(&it); } /// We had an exception (argument is the exception's value) void jspSetException(JsVar *value) { // Add the exception itself to a variable in root scope JsVar *exception = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, true); if (exception) { jsvSetValueOfName(exception, value); jsvUnLock(exception); } // Set the exception flag execInfo.execute = execInfo.execute | EXEC_EXCEPTION; // Try and do a stack trace if (lex) { JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, " at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); // stop us from printing the trace in the same block execInfo.execute = execInfo.execute | EXEC_ERROR_LINE_REPORTED; } } } /** Return the reported exception if there was one (and clear it) */ JsVar *jspGetException() { JsVar *exceptionName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, false); if (exceptionName) { JsVar *exception = jsvSkipName(exceptionName); jsvRemoveChild(execInfo.hiddenRoot, exceptionName); jsvUnLock(exceptionName); JsVar *stack = jspGetStackTrace(); if (stack && jsvHasChildren(exception)) { jsvObjectSetChild(exception, "stack", stack); } jsvUnLock(stack); return exception; } return 0; } /** Return a stack trace string if there was one (and clear it) */ JsVar *jspGetStackTrace() { JsVar *stackTraceName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, false); if (stackTraceName) { JsVar *stackTrace = jsvSkipName(stackTraceName); jsvRemoveChild(execInfo.hiddenRoot, stackTraceName); jsvUnLock(stackTraceName); return stackTrace; } return 0; } // ---------------------------------------------- // we return a value so that JSP_MATCH can return 0 if it fails (if we pass 0, we just parse all args) NO_INLINE bool jspeFunctionArguments(JsVar *funcVar) { JSP_MATCH('('); while (lex->tk!=')') { if (funcVar) { char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; strcpy(&buf[1], jslGetTokenValueAsString(lex)); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); if (!param) { // out of memory jspSetError(false); return false; } jsvMakeFunctionParameter(param); // force this to be called a function parameter jsvUnLock(param); } JSP_MATCH(LEX_ID); if (lex->tk!=')') JSP_MATCH(','); } JSP_MATCH(')'); return true; } // Parse function, assuming we're on '{'. funcVar can be 0 NO_INLINE bool jspeFunctionDefinitionInternal(JsVar *funcVar, bool expressionOnly) { if (expressionOnly) { if (funcVar) funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; } else { JSP_MATCH('{'); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_STR && !strcmp(jslGetTokenValueAsString(lex), "compiled")) { jsWarn("Function marked with \"compiled\" uploaded in source form"); } #endif /* If the function starts with return, treat it specially - * we don't want to store the 'return' part of it */ if (funcVar && lex->tk==LEX_R_RETURN) { funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; JSP_ASSERT_MATCH(LEX_R_RETURN); } } // Get the line number (if needed) JsVarInt lineNumber = 0; if (funcVar && lex->lineNumberOffset) { // jslGetLineNumber is slow, so we only do it if we have debug info lineNumber = (JsVarInt)jslGetLineNumber(lex) + (JsVarInt)lex->lineNumberOffset - 1; } // Get the code - parse it and figure out where it stops JslCharPos funcBegin = jslCharPosClone(&lex->tokenStart); int lastTokenEnd = -1; if (!expressionOnly) { int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->it)-1; JSP_ASSERT_MATCH(lex->tk); } } else { JsExecFlags oldExec = execInfo.execute; execInfo.execute = EXEC_NO; jsvUnLock(jspeAssignmentExpression()); execInfo.execute = oldExec; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->tokenStart.it)-1; } // Then create var and set (if there was any code!) if (funcVar && lastTokenEnd>0) { // code var JsVar *funcCodeVar; if (jsvIsNativeString(lex->sourceVar)) { /* If we're parsing from a Native String (eg. E.memoryArea, E.setBootCode) then use another Native String to load function code straight from flash */ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewNativeString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); } else { if (jsfGetFlag(JSF_PRETOKENISE)) { funcCodeVar = jslNewTokenisedStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } else { funcCodeVar = jslNewStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } } jsvUnLock2(jsvAddNamedChild(funcVar, funcCodeVar, JSPARSE_FUNCTION_CODE_NAME), funcCodeVar); // scope var JsVar *funcScopeVar = jspeiGetScopesAsVar(); if (funcScopeVar) { jsvUnLock2(jsvAddNamedChild(funcVar, funcScopeVar, JSPARSE_FUNCTION_SCOPE_NAME), funcScopeVar); } // If we've got a line number, add a var for it if (lineNumber) { JsVar *funcLineNumber = jsvNewFromInteger(lineNumber); if (funcLineNumber) { jsvUnLock2(jsvAddNamedChild(funcVar, funcLineNumber, JSPARSE_FUNCTION_LINENUMBER_NAME), funcLineNumber); } } } jslCharPosFree(&funcBegin); if (!expressionOnly) JSP_MATCH('}'); return 0; } // Parse function (after 'function' has occurred NO_INLINE JsVar *jspeFunctionDefinition(bool parseNamedFunction) { // actually parse a function... We assume that the LEX_FUNCTION and name // have already been parsed JsVar *funcVar = 0; bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) funcVar = jsvNewWithFlags(JSV_FUNCTION); JsVar *functionInternalName = 0; if (parseNamedFunction && lex->tk==LEX_ID) { // you can do `var a = function foo() { foo(); };` - so cope with this if (funcVar) functionInternalName = jslGetTokenValueAsVar(lex); // note that we don't add it to the beginning, because it would mess up our function call code JSP_ASSERT_MATCH(LEX_ID); } // Get arguments save them to the structure if (!jspeFunctionArguments(funcVar)) { jsvUnLock2(functionInternalName, funcVar); // parse failed return 0; } // Parse the actual function block jspeFunctionDefinitionInternal(funcVar, false); // if we had a function name, add it to the end (if we don't it gets confused with arguments) if (funcVar && functionInternalName) jsvObjectSetChildAndUnLock(funcVar, JSPARSE_FUNCTION_NAME_NAME, functionInternalName); return funcVar; } /* Parse just the brackets of a function - and throw * everything away */ NO_INLINE bool jspeParseFunctionCallBrackets() { assert(!JSP_SHOULD_EXECUTE); JSP_MATCH('('); while (!JSP_SHOULDNT_PARSE && lex->tk != ')') { jsvUnLock(jspeAssignmentExpression()); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ARROW_FUNCTION) { jsvUnLock(jspeArrowFunction(0, 0)); } #endif if (lex->tk!=')') JSP_MATCH(','); } if (!JSP_SHOULDNT_PARSE) JSP_MATCH(')'); return 0; } /** Handle a function call (assumes we've parsed the function name and we're * on the start bracket). 'thisArg' is the value of the 'this' variable when the * function is executed (it's usually the parent object) * * * NOTE: this does not set the execInfo flags - so if execInfo==EXEC_NO, it won't execute * * If !isParsing and arg0!=0, argument 0 is set to what is supplied (same with arg1) * * functionName is used only for error reporting - and can be 0 */ NO_INLINE JsVar *jspeFunctionCall(JsVar *function, JsVar *functionName, JsVar *thisArg, bool isParsing, int argCount, JsVar **argPtr) { if (JSP_SHOULD_EXECUTE && !function) { if (functionName) jsExceptionHere(JSET_ERROR, "Function %q not found!", functionName); else jsExceptionHere(JSET_ERROR, "Function not found!", functionName); return 0; } if (JSP_SHOULD_EXECUTE) if (!jspCheckStackPosition()) return 0; // try and ensure that we won't overflow our stack if (JSP_SHOULD_EXECUTE && function) { JsVar *returnVar = 0; if (!jsvIsFunction(function)) { jsExceptionHere(JSET_ERROR, "Expecting a function to call, got %t", function); return 0; } JsVar *thisVar = jsvLockAgainSafe(thisArg); if (isParsing) JSP_MATCH('('); /* Ok, so we have 4 options here. * * 1: we're native. * a) args have been pre-parsed, which is awesome * b) we have to parse our own args into an array * 2: we're not native * a) args were pre-parsed and we have to populate the function * b) we parse our own args, which is possibly better */ if (jsvIsNative(function)) { // ------------------------------------- NATIVE unsigned int argPtrSize = 0; int boundArgs = 0; // Add 'bound' parameters if there were any JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); while (jsvIsFunctionParameter(param)) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack if needed unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } // if we already had arguments - shift them up... int i; for (i=argCount-1;i>=boundArgs;i--) argPtr[i+1] = argPtr[i]; // add bound argument argPtr[boundArgs] = jsvSkipName(param); argCount++; boundArgs++; jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } // check if 'this' was defined while (param) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); break; } jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } jsvUnLock(param); jsvObjectIteratorFree(&it); // Now, if we're parsing add the rest of the arguments int allocatedArgCount = boundArgs; if (isParsing) { while (!JSP_HAS_ERROR && lex->tk!=')' && lex->tk!=LEX_EOF) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } argPtr[argCount++] = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',',jsvUnLockMany((unsigned)argCount, argPtr);jsvUnLock(thisVar);, 0); } JSP_MATCH(')'); allocatedArgCount = argCount; } void *nativePtr = jsvGetNativeFunctionPtr(function); JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else { if (nativePtr==jswrap_eval) { // eval gets to use the current scope /* Note: proper JS has some utterly insane code that depends on whether * eval is an lvalue or not: * * http://stackoverflow.com/questions/9107240/1-evalthis-vs-evalthis-in-javascript * * Doing this in Espruino is quite an upheaval for that one * slightly insane case - so it's not implemented. */ if (execInfo.thisVar) execInfo.thisVar = jsvRef(execInfo.thisVar); } else { execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root } } if (nativePtr && !JSP_HAS_ERROR) { returnVar = jsnCallFunction(nativePtr, function->varData.native.argTypes, thisVar, argPtr, argCount); } else { returnVar = 0; } // unlock values if we locked them jsvUnLockMany((unsigned)allocatedArgCount, argPtr); /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; } else { // ----------------------------------------------------- NOT NATIVE // create a new symbol table entry for execution of this function // OPT: can we cache this function execution environment + param variables? // OPT: Probably when calling a function ONCE, use it, otherwise when recursing, make new? JsVar *functionRoot = jsvNewWithFlags(JSV_FUNCTION); if (!functionRoot) { // out of memory jspSetError(false); jsvUnLock(thisVar); return 0; } JsVar *functionScope = 0; JsVar *functionCode = 0; JsVar *functionInternalName = 0; uint16_t functionLineNumber = 0; /** NOTE: We expect that the function object will have: * * * Parameters * * Code/Scope/Name * * IN THAT ORDER. */ JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); JsVar *value = jsvObjectIteratorGetValue(&it); while (jsvIsFunctionParameter(param) && value) { JsVar *paramName = jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH); if (paramName) { // could be out of memory jsvMakeFunctionParameter(paramName); // force this to be called a function parameter jsvSetValueOfName(paramName, value); jsvAddName(functionRoot, paramName); jsvUnLock(paramName); } else jspSetError(false); jsvUnLock2(value, param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); value = jsvObjectIteratorGetValue(&it); } jsvUnLock2(value, param); if (isParsing) { int hadParams = 0; // grab in all parameters. We go around this loop until we've run out // of named parameters AND we've parsed all the supplied arguments while (!JSP_SHOULDNT_PARSE && lex->tk!=')') { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); if (lex->tk!=')' || paramDefined) { hadParams++; JsVar *value = 0; // ONLY parse this if it was supplied, otherwise leave 0 (undefined) if (lex->tk!=')') value = jspeAssignmentExpression(); // and if execute, copy it over value = jsvSkipNameAndUnLock(value); JsVar *paramName = paramDefined ? jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH) : jsvNewFromEmptyString(); if (paramName) { // could be out of memory jsvMakeFunctionParameter(paramName); // force this to be called a function parameter jsvSetValueOfName(paramName, value); jsvAddName(functionRoot, paramName); jsvUnLock(paramName); } else jspSetError(false); jsvUnLock(value); if (lex->tk!=')') JSP_MATCH(','); } jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } JSP_MATCH(')'); } else { // and NOT isParsing int args = 0; while (args<argCount) { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); JsVar *paramName = paramDefined ? jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH) : jsvNewFromEmptyString(); if (paramName) { jsvMakeFunctionParameter(paramName); // force this to be called a function parameter jsvSetValueOfName(paramName, argPtr[args]); jsvAddName(functionRoot, paramName); jsvUnLock(paramName); } else jspSetError(false); args++; jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } } // Now go through what's left while (jsvObjectIteratorHasValue(&it)) { JsVar *param = jsvObjectIteratorGetKey(&it); if (jsvIsString(param)) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_SCOPE_NAME)) functionScope = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_CODE_NAME)) functionCode = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_NAME_NAME)) functionInternalName = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); } else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_LINENUMBER_NAME)) functionLineNumber = (uint16_t)jsvGetIntegerAndUnLock(jsvSkipName(param)); else if (jsvIsFunctionParameter(param)) { JsVar *paramName = jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH); // paramName is already a name (it's a function parameter) if (paramName) {// could be out of memory - or maybe just not supplied! jsvMakeFunctionParameter(paramName); JsVar *defaultVal = jsvSkipName(param); if (defaultVal) jsvUnLock(jsvSetValueOfName(paramName, defaultVal)); jsvAddName(functionRoot, paramName); jsvUnLock(paramName); } } } jsvUnLock(param); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); // setup a the function's name (if a named function) if (functionInternalName) { JsVar *name = jsvMakeIntoVariableName(jsvNewFromStringVar(functionInternalName,0,JSVAPPENDSTRINGVAR_MAXLENGTH), function); jsvAddName(functionRoot, name); jsvUnLock2(name, functionInternalName); } if (!JSP_HAS_ERROR) { // save old scopes JsVar *oldScopes[JSPARSE_MAX_SCOPES]; int oldScopeCount; int i; oldScopeCount = execInfo.scopeCount; for (i=0;i<execInfo.scopeCount;i++) oldScopes[i] = execInfo.scopes[i]; // if we have a scope var, load it up. We may not have one if there were no scopes apart from root if (functionScope) { jspeiLoadScopesFromVar(functionScope); jsvUnLock(functionScope); } else { // no scope var defined? We have no scopes at all! execInfo.scopeCount = 0; } // add the function's execute space to the symbol table so we can recurse if (jspeiAddScope(functionRoot)) { /* Adding scope may have failed - we may have descended too deep - so be sure * not to pull somebody else's scope off */ JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root /* we just want to execute the block, but something could * have messed up and left us with the wrong Lexer, so * we want to be careful here... */ if (functionCode) { #ifdef USE_DEBUGGER bool hadDebuggerNextLineOnly = false; if (execInfo.execute&EXEC_DEBUGGER_STEP_INTO) { if (functionName) jsiConsolePrintf("Stepping into %v\n", functionName); else jsiConsolePrintf("Stepping into function\n", functionName); } else { hadDebuggerNextLineOnly = execInfo.execute&EXEC_DEBUGGER_NEXT_LINE; if (hadDebuggerNextLineOnly) execInfo.execute &= (JsExecFlags)~EXEC_DEBUGGER_NEXT_LINE; } #endif JsLex newLex; JsLex *oldLex = jslSetLex(&newLex); jslInit(functionCode); newLex.lineNumberOffset = functionLineNumber; JSP_SAVE_EXECUTE(); // force execute without any previous state #ifdef USE_DEBUGGER execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK|EXEC_DEBUGGER_NEXT_LINE)); #else execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK)); #endif if (jsvIsFunctionReturn(function)) { #ifdef USE_DEBUGGER // we didn't parse a statement so wouldn't trigger the debugger otherwise if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = jsvStringIteratorGetIndex(&lex->tokenStart.it)-1; jsiDebuggerLoop(); } #endif // implicit return - we just need an expression (optional) if (lex->tk != ';' && lex->tk != '}') returnVar = jsvSkipNameAndUnLock(jspeExpression()); } else { // setup a return variable JsVar *returnVarName = jsvAddNamedChild(functionRoot, 0, JSPARSE_RETURN_VAR); // parse the whole block jspeBlockNoBrackets(); /* get the real return var before we remove it from our function. * We can unlock below because returnVarName is still part of * functionRoot, so won't get freed. */ returnVar = jsvSkipNameAndUnLock(returnVarName); if (returnVarName) // could have failed with out of memory jsvSetValueOfName(returnVarName, 0); // remove return value (which helps stops circular references) } // Store a stack trace if we had an error JsExecFlags hasError = execInfo.execute&EXEC_ERROR_MASK; JSP_RESTORE_EXECUTE(); // because return will probably have set execute to false #ifdef USE_DEBUGGER bool calledDebugger = false; if (execInfo.execute & EXEC_DEBUGGER_MASK) { jsiConsolePrint("Value returned is ="); jsfPrintJSON(returnVar, JSON_LIMIT | JSON_SOME_NEWLINES | JSON_PRETTY | JSON_SHOW_DEVICES); jsiConsolePrintChar('\n'); if (execInfo.execute & EXEC_DEBUGGER_FINISH_FUNCTION) { calledDebugger = true; jsiDebuggerLoop(); } } if (hadDebuggerNextLineOnly && !calledDebugger) execInfo.execute |= EXEC_DEBUGGER_NEXT_LINE; #endif jslKill(); jslSetLex(oldLex); if (hasError) { execInfo.execute |= hasError; // propogate error JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, jsvIsString(functionName)?"in function %q called from ": "in function called from ", functionName); if (lex) { jspAppendStackTrace(stackTrace); } else jsvAppendPrintf(stackTrace, "system\n"); jsvUnLock(stackTrace); } } } /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; jspeiRemoveScope(); } // Unref old scopes for (i=0;i<execInfo.scopeCount;i++) jsvUnLock(execInfo.scopes[i]); // restore function scopes for (i=0;i<oldScopeCount;i++) execInfo.scopes[i] = oldScopes[i]; execInfo.scopeCount = oldScopeCount; } jsvUnLock(functionCode); jsvUnLock(functionRoot); } jsvUnLock(thisVar); return returnVar; } else if (isParsing) { // ---------------------------------- function, but not executing - just parse args and be done jspeParseFunctionCallBrackets(); /* Do not return function, as it will be unlocked! */ return 0; } else return 0; } // Find a variable (or built-in function) based on the current scopes JsVar *jspGetNamedVariable(const char *tokenName) { JsVar *a = JSP_SHOULD_EXECUTE ? jspeiFindInScopes(tokenName) : 0; if (JSP_SHOULD_EXECUTE && !a) { /* Special case! We haven't found the variable, so check out * and see if it's one of our builtins... */ if (jswIsBuiltInObject(tokenName)) { // Check if we have a built-in function for it // OPT: Could we instead have jswIsBuiltInObjectWithoutConstructor? JsVar *obj = jswFindBuiltInFunction(0, tokenName); // If not, make one if (!obj) obj = jspNewBuiltin(tokenName); if (obj) { // not out of memory a = jsvAddNamedChild(execInfo.root, obj, tokenName); jsvUnLock(obj); } } else { a = jswFindBuiltInFunction(0, tokenName); if (!a) { /* Variable doesn't exist! JavaScript says we should create it * (we won't add it here. This is done in the assignment operator)*/ a = jsvMakeIntoVariableName(jsvNewFromString(tokenName), 0); } } } return a; } /// Used by jspGetNamedField / jspGetVarNamedField static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name. */ if (child && returnName) { // Get rid of existing name child = jsvSkipNameAndUnLock(child); // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { child = jspNewPrototype(objName); } } } return child; } /** Get the named function/variable on the object - whether it's built in, or predefined. * If !returnName, returns the function/variable itself or undefined, but * if returnName, return a name (could be fake) referencing the parent. * * NOTE: ArrayBuffer/Strings are not handled here. We assume that if we're * passing a char* rather than a JsVar it's because we're looking up via * a symbol rather than a variable. To handle these use jspGetVarNamedField */ JsVar *jspGetNamedField(JsVar *object, const char* name, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromString(object, name, false); if (!child) { child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// see jspGetNamedField - note that nameVar should have had jsvAsArrayIndex called on it first JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromVar(object, nameVar, false); if (!child) { if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) { // for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object); if (child) // turn into an 'array buffer name' child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME; } else if (jsvIsString(object) && jsvIsInt(nameVar)) { JsVarInt idx = jsvGetInteger(nameVar); if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) { char ch = jsvGetCharInString(object, (size_t)idx); child = jsvNewStringOfLength(1, &ch); } else if (returnName) child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled } else { // get the name as a string char name[JSLEX_MAX_TOKEN_LENGTH]; jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH); // try and find it in parents child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// Call the named function on the object - whether it's built in, or predefined. Returns the return value of the function. JsVar *jspCallNamedFunction(JsVar *object, char* name, int argCount, JsVar **argPtr) { JsVar *child = jspGetNamedField(object, name, false); JsVar *r = 0; if (jsvIsFunction(child)) r = jspeFunctionCall(child, 0, object, false, argCount, argPtr); jsvUnLock(child); return r; } NO_INLINE JsVar *jspeFactorMember(JsVar *a, JsVar **parentResult) { /* The parent if we're executing a method call */ JsVar *parent = 0; while (lex->tk=='.' || lex->tk=='[') { if (lex->tk == '.') { // ------------------------------------- Record Access JSP_ASSERT_MATCH('.'); if (jslIsIDOrReservedWord(lex)) { if (JSP_SHOULD_EXECUTE) { // Note: name will go away when we parse something else! const char *name = jslGetTokenValueAsString(lex); JsVar *aVar = jsvSkipName(a); JsVar *child = 0; if (aVar) child = jspGetNamedField(aVar, name, true); if (!child) { if (!jsvIsUndefined(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written JsVar *nameVar = jslGetTokenValueAsVar(lex); child = jsvCreateNewChild(aVar, nameVar, 0); jsvUnLock(nameVar); } else { // could have been a string... jsExceptionHere(JSET_ERROR, "Cannot read property '%s' of undefined", name); } } jsvUnLock(parent); parent = aVar; jsvUnLock(a); a = child; } // skip over current token (we checked above that it was an ID or reserved word) jslGetNextToken(lex); } else { // incorrect token - force a match fail by asking for an ID JSP_MATCH_WITH_RETURN(LEX_ID, a); } } else if (lex->tk == '[') { // ------------------------------------- Array Access JsVar *index; JSP_ASSERT_MATCH('['); if (!jspCheckStackPosition()) return parent; index = jsvSkipNameAndUnLock(jspeAssignmentExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(']', jsvUnLock2(parent, index);, a); if (JSP_SHOULD_EXECUTE) { index = jsvAsArrayIndexAndUnLock(index); JsVar *aVar = jsvSkipName(a); JsVar *child = 0; if (aVar) child = jspGetVarNamedField(aVar, index, true); if (!child) { if (jsvHasChildren(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written child = jsvCreateNewChild(aVar, index, 0); } else { jsExceptionHere(JSET_ERROR, "Field or method %q does not already exist, and can't create it on %t", index, aVar); } } jsvUnLock(parent); parent = jsvLockAgainSafe(aVar); jsvUnLock(a); a = child; jsvUnLock(aVar); } jsvUnLock(index); } else { assert(0); } } if (parentResult) *parentResult = parent; else jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeConstruct(JsVar *func, JsVar *funcName, bool hasArgs) { assert(JSP_SHOULD_EXECUTE); if (!jsvIsFunction(func)) { jsExceptionHere(JSET_ERROR, "Constructor should be a function, but is %t", func); return 0; } JsVar *thisObj = jsvNewObject(); if (!thisObj) return 0; // out of memory // Make sure the function has a 'prototype' var JsVar *prototypeName = jsvFindChildFromString(func, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(func, prototypeName); // make sure it's an object JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(thisObj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName); JsVar *a = jspeFunctionCall(func, funcName, thisObj, hasArgs, 0, 0); /* FIXME: we should ignore return values that aren't objects (bug #848), but then we need * to be aware of `new String()` and `new Uint8Array()`. Ideally we'd let through * arrays/etc, and then String/etc should return 'boxed' values. * * But they don't return boxed values at the moment, so let's just * pass the return value through. If you try and return a string from * a function it's broken JS code anyway. */ if (a) { jsvUnLock(thisObj); thisObj = a; } else { jsvUnLock(a); } return thisObj; } NO_INLINE JsVar *jspeFactorFunctionCall() { /* The parent if we're executing a method call */ bool isConstructor = false; if (lex->tk==LEX_R_NEW) { JSP_ASSERT_MATCH(LEX_R_NEW); isConstructor = true; if (lex->tk==LEX_R_NEW) { jsExceptionHere(JSET_ERROR, "Nesting 'new' operators is unsupported"); jspSetError(false); return 0; } } JsVar *parent = 0; #ifndef SAVE_ON_FLASH bool wasSuper = lex->tk==LEX_R_SUPER; #endif JsVar *a = jspeFactorMember(jspeFactor(), &parent); #ifndef SAVE_ON_FLASH if (wasSuper) { /* if this was 'super.something' then we need * to overwrite the parent, because it'll be * set to the prototype otherwise. */ jsvUnLock(parent); parent = jsvLockAgainSafe(execInfo.thisVar); } #endif while ((lex->tk=='(' || (isConstructor && JSP_SHOULD_EXECUTE)) && !jspIsInterrupted()) { JsVar *funcName = a; JsVar *func = jsvSkipName(funcName); /* The constructor function doesn't change parsing, so if we're * not executing, just short-cut it. */ if (isConstructor && JSP_SHOULD_EXECUTE) { // If we have '(' parse an argument list, otherwise don't look for any args bool parseArgs = lex->tk=='('; a = jspeConstruct(func, funcName, parseArgs); isConstructor = false; // don't treat subsequent brackets as constructors } else a = jspeFunctionCall(func, funcName, parent, true, 0, 0); jsvUnLock3(funcName, func, parent); parent=0; a = jspeFactorMember(a, &parent); } jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeFactorObject() { if (JSP_SHOULD_EXECUTE) { JsVar *contents = jsvNewObject(); if (!contents) { // out of memory jspSetError(false); return 0; } /* JSON-style object definition */ JSP_MATCH_WITH_RETURN('{', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != '}') { JsVar *varName = 0; // we only allow strings or IDs on the left hand side of an initialisation if (jslIsIDOrReservedWord(lex)) { if (JSP_SHOULD_EXECUTE) varName = jslGetTokenValueAsVar(lex); jslGetNextToken(lex); // skip over current token } else if ( lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_FLOAT || lex->tk==LEX_INT || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED) { varName = jspeFactor(); } else { JSP_MATCH_WITH_RETURN(LEX_ID, contents); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock(varName), contents); if (JSP_SHOULD_EXECUTE) { varName = jsvAsArrayIndexAndUnLock(varName); JsVar *contentsName = jsvFindChildFromVar(contents, varName, true); if (contentsName) { JsVar *value = jsvSkipNameAndUnLock(jspeAssignmentExpression()); // value can be 0 (could be undefined!) jsvUnLock2(jsvSetValueOfName(contentsName, value), value); } } jsvUnLock(varName); // no need to clean here, as it will definitely be used if (lex->tk != '}') JSP_MATCH_WITH_RETURN(',', contents); } JSP_MATCH_WITH_RETURN('}', contents); return contents; } else { // Not executing so do fast skip jspeBlock(); return 0; } } NO_INLINE JsVar *jspeFactorArray() { int idx = 0; JsVar *contents = 0; if (JSP_SHOULD_EXECUTE) { contents = jsvNewEmptyArray(); if (!contents) { // out of memory jspSetError(false); return 0; } } /* JSON-style array */ JSP_MATCH_WITH_RETURN('[', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != ']') { if (JSP_SHOULD_EXECUTE) { JsVar *aVar = 0; JsVar *indexName = 0; if (lex->tk != ',') { // #287 - [,] and [1,2,,4] are allowed aVar = jsvSkipNameAndUnLock(jspeAssignmentExpression()); indexName = jsvMakeIntoVariableName(jsvNewFromInteger(idx), aVar); } if (indexName) { // could be out of memory jsvAddName(contents, indexName); jsvUnLock(indexName); } jsvUnLock(aVar); } else { jsvUnLock(jspeAssignmentExpression()); } // no need to clean here, as it will definitely be used if (lex->tk != ']') JSP_MATCH_WITH_RETURN(',', contents); idx++; } if (contents) jsvSetArrayLength(contents, idx, false); JSP_MATCH_WITH_RETURN(']', contents); return contents; } NO_INLINE void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName) { if (!prototypeName) return; JsVar *prototypeVar = jsvSkipName(prototypeName); if (!jsvIsObject(prototypeVar)) { if (!jsvIsUndefined(prototypeVar)) jsExceptionHere(JSET_TYPEERROR, "Prototype should be an object, got %t", prototypeVar); jsvUnLock(prototypeVar); prototypeVar = jsvNewObject(); // prototype is supposed to be an object JsVar *lastName = jsvSkipToLastName(prototypeName); jsvSetValueOfName(lastName, prototypeVar); jsvUnLock(lastName); } JsVar *constructor = jsvFindChildFromString(prototypeVar, JSPARSE_CONSTRUCTOR_VAR, true); if (constructor) jsvSetValueOfName(constructor, instanceOf); jsvUnLock2(constructor, prototypeVar); } NO_INLINE JsVar *jspeFactorTypeOf() { JSP_ASSERT_MATCH(LEX_R_TYPEOF); JsVar *a = jspeUnaryExpression(); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { if (!jsvIsVariableDefined(a)) { // so we don't get a ReferenceError when accessing an undefined var result=jsvNewFromString("undefined"); } else { a = jsvSkipNameAndUnLock(a); result=jsvNewFromString(jsvGetTypeOf(a)); } } jsvUnLock(a); return result; } NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); if (parent && !jsvIsFunction(parent)) { // else remove properly. if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; } #ifndef SAVE_ON_FLASH JsVar *jspeTemplateLiteral() { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { JsVar *template = jslGetTokenValueAsVar(lex); a = jsvNewFromEmptyString(); if (a && template) { JsvStringIterator it, dit; jsvStringIteratorNew(&it, template, 0); jsvStringIteratorNew(&dit, a, 0); while (jsvStringIteratorHasChar(&it)) { char ch = jsvStringIteratorGetChar(&it); if (ch=='$') { jsvStringIteratorNext(&it); ch = jsvStringIteratorGetChar(&it); if (ch=='{') { // Now parse out the expression jsvStringIteratorNext(&it); int brackets = 1; JsVar *expr = jsvNewFromEmptyString(); if (!expr) break; JsvStringIterator eit; jsvStringIteratorNew(&eit, expr, 0); while (jsvStringIteratorHasChar(&it)) { ch = jsvStringIteratorGetChar(&it); jsvStringIteratorNext(&it); if (ch=='{') brackets++; if (ch=='}') { brackets--; if (!brackets) break; } jsvStringIteratorAppend(&eit, ch); } jsvStringIteratorFree(&eit); JsVar *result = jspEvaluateExpressionVar(expr); jsvUnLock(expr); result = jsvAsString(result, true); jsvStringIteratorAppendString(&dit, result, 0); jsvUnLock(result); } else { jsvStringIteratorAppend(&dit, '$'); } } else { jsvStringIteratorAppend(&dit, ch); jsvStringIteratorNext(&it); } } jsvStringIteratorFree(&it); jsvStringIteratorFree(&dit); } jsvUnLock(template); } JSP_ASSERT_MATCH(LEX_TEMPLATE_LITERAL); return a; } #endif NO_INLINE JsVar *jspeAddNamedFunctionParameter(JsVar *funcVar, JsVar *name) { if (!funcVar) funcVar = jsvNewWithFlags(JSV_FUNCTION); char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; jsvGetString(name, &buf[1], JSLEX_MAX_TOKEN_LENGTH); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); jsvMakeFunctionParameter(param); jsvUnLock(param); return funcVar; } #ifndef SAVE_ON_FLASH // parse an arrow function NO_INLINE JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a) { assert(!a || jsvIsName(a)); JSP_ASSERT_MATCH(LEX_ARROW_FUNCTION); funcVar = jspeAddNamedFunctionParameter(funcVar, a); bool expressionOnly = lex->tk!='{'; jspeFunctionDefinitionInternal(funcVar, expressionOnly); if (execInfo.thisVar) { jsvObjectSetChild(funcVar, JSPARSE_FUNCTION_THIS_NAME, execInfo.thisVar); } return funcVar; } // parse expressions with commas, maybe followed by an arrow function (bracket already matched) NO_INLINE JsVar *jspeExpressionOrArrowFunction() { JsVar *a = 0; JsVar *funcVar = 0; bool allNames = true; while (lex->tk!=')' && !JSP_SHOULDNT_PARSE) { if (allNames && a) { // we never get here if this isn't a name and a string funcVar = jspeAddNamedFunctionParameter(funcVar, a); } jsvUnLock(a); a = jspeAssignmentExpression(); if (!(jsvIsName(a) && jsvIsString(a))) allNames = false; if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',', jsvUnLock2(a,funcVar), 0); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(a,funcVar), 0); // if arrow is found, create a function if (allNames && lex->tk==LEX_ARROW_FUNCTION) { funcVar = jspeArrowFunction(funcVar, a); jsvUnLock(a); return funcVar; } else { jsvUnLock(funcVar); return a; } } /// Parse an ES6 class, expects LEX_R_CLASS already parsed NO_INLINE JsVar *jspeClassDefinition(bool parseNamedClass) { JsVar *classFunction = 0; JsVar *classPrototype = 0; JsVar *classInternalName = 0; bool actuallyCreateClass = JSP_SHOULD_EXECUTE; if (actuallyCreateClass) classFunction = jsvNewWithFlags(JSV_FUNCTION); if (parseNamedClass && lex->tk==LEX_ID) { if (classFunction) classInternalName = jslGetTokenValueAsVar(lex); JSP_ASSERT_MATCH(LEX_ID); } if (classFunction) { JsVar *prototypeName = jsvFindChildFromString(classFunction, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(classFunction, prototypeName); // make sure it's an object classPrototype = jsvSkipName(prototypeName); jsvUnLock(prototypeName); } if (lex->tk==LEX_R_EXTENDS) { JSP_ASSERT_MATCH(LEX_R_EXTENDS); JsVar *extendsFrom = actuallyCreateClass ? jsvSkipNameAndUnLock(jspGetNamedVariable(jslGetTokenValueAsString(lex))) : 0; JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(extendsFrom,classFunction,classInternalName,classPrototype),0); if (classPrototype) { if (jsvIsFunction(extendsFrom)) { jsvObjectSetChild(classPrototype, JSPARSE_INHERITS_VAR, extendsFrom); // link in default constructor if ours isn't supplied jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_CODE_NAME, jsvNewFromString("if(this.__proto__.__proto__)this.__proto__.__proto__.apply(this,arguments)")); } else jsExceptionHere(JSET_SYNTAXERROR, "'extends' argument should be a function, got %t", extendsFrom); } jsvUnLock(extendsFrom); } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{',jsvUnLock3(classFunction,classInternalName,classPrototype),0); while ((lex->tk==LEX_ID || lex->tk==LEX_R_STATIC) && !jspIsInterrupted()) { bool isStatic = lex->tk==LEX_R_STATIC; if (isStatic) JSP_ASSERT_MATCH(LEX_R_STATIC); JsVar *funcName = jslGetTokenValueAsVar(lex); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock3(classFunction,classInternalName,classPrototype),0); JsVar *method = jspeFunctionDefinition(false); if (classFunction && classPrototype) { if (jsvIsStringEqual(funcName, "get") || jsvIsStringEqual(funcName, "set")) { jsExceptionHere(JSET_SYNTAXERROR, "'get' and 'set' and not supported in Espruino"); } else if (jsvIsStringEqual(funcName, "constructor")) { jswrap_function_replaceWith(classFunction, method); } else { funcName = jsvMakeIntoVariableName(funcName, 0); jsvSetValueOfName(funcName, method); jsvAddName(isStatic ? classFunction : classPrototype, funcName); } } jsvUnLock2(method,funcName); } jsvUnLock(classPrototype); // If we had a name, add it to the end (or it gets confused with the constructor arguments) if (classInternalName) jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_NAME_NAME, classInternalName); JSP_MATCH_WITH_CLEANUP_AND_RETURN('}',jsvUnLock(classFunction),0); return classFunction; } #endif NO_INLINE JsVar *jspeFactor() { if (lex->tk==LEX_ID) { JsVar *a = jspGetNamedVariable(jslGetTokenValueAsString(lex)); JSP_ASSERT_MATCH(LEX_ID); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_TEMPLATE_LITERAL) jsExceptionHere(JSET_SYNTAXERROR, "Tagged template literals not supported"); else if (lex->tk==LEX_ARROW_FUNCTION && jsvIsName(a)) { JsVar *funcVar = jspeArrowFunction(0,a); jsvUnLock(a); a=funcVar; } #endif return a; } else if (lex->tk==LEX_INT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromLongInteger(stringToInt(jslGetTokenValueAsString(lex))); } JSP_ASSERT_MATCH(LEX_INT); return v; } else if (lex->tk==LEX_FLOAT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromFloat(stringToFloat(jslGetTokenValueAsString(lex))); } JSP_ASSERT_MATCH(LEX_FLOAT); return v; } else if (lex->tk=='(') { JSP_ASSERT_MATCH('('); if (!jspCheckStackPosition()) return 0; #ifdef SAVE_ON_FLASH // Just parse a normal expression (which can include commas) JsVar *a = jspeExpression(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN(')',a); return a; #else return jspeExpressionOrArrowFunction(); #endif } else if (lex->tk==LEX_R_TRUE) { JSP_ASSERT_MATCH(LEX_R_TRUE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(true) : 0; } else if (lex->tk==LEX_R_FALSE) { JSP_ASSERT_MATCH(LEX_R_FALSE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(false) : 0; } else if (lex->tk==LEX_R_NULL) { JSP_ASSERT_MATCH(LEX_R_NULL); return JSP_SHOULD_EXECUTE ? jsvNewWithFlags(JSV_NULL) : 0; } else if (lex->tk==LEX_R_UNDEFINED) { JSP_ASSERT_MATCH(LEX_R_UNDEFINED); return 0; } else if (lex->tk==LEX_STR) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) a = jslGetTokenValueAsVar(lex); JSP_ASSERT_MATCH(LEX_STR); return a; #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_TEMPLATE_LITERAL) { return jspeTemplateLiteral(); #endif } else if (lex->tk==LEX_REGEX) { JsVar *a = 0; #ifdef SAVE_ON_FLASH jsExceptionHere(JSET_SYNTAXERROR, "RegEx are not supported in this version of Espruino\n"); #else JsVar *regex = jslGetTokenValueAsVar(lex); size_t regexEnd = 0, regexLen = 0; JsvStringIterator it; jsvStringIteratorNew(&it, regex, 0); while (jsvStringIteratorHasChar(&it)) { regexLen++; if (jsvStringIteratorGetChar(&it)=='/') regexEnd = regexLen; jsvStringIteratorNext(&it); } jsvStringIteratorFree(&it); JsVar *flags = 0; if (regexEnd < regexLen) flags = jsvNewFromStringVar(regex, regexEnd, JSVAPPENDSTRINGVAR_MAXLENGTH); JsVar *regexSource = jsvNewFromStringVar(regex, 1, regexEnd-2); a = jswrap_regexp_constructor(regexSource, flags); jsvUnLock3(regex, flags, regexSource); #endif JSP_ASSERT_MATCH(LEX_REGEX); return a; } else if (lex->tk=='{') { if (!jspCheckStackPosition()) return 0; return jspeFactorObject(); } else if (lex->tk=='[') { if (!jspCheckStackPosition()) return 0; return jspeFactorArray(); } else if (lex->tk==LEX_R_FUNCTION) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_FUNCTION); return jspeFunctionDefinition(true); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_CLASS); return jspeClassDefinition(true); } else if (lex->tk==LEX_R_SUPER) { JSP_ASSERT_MATCH(LEX_R_SUPER); /* This is kind of nasty, since super appears to do three different things. * In the constructor it references the extended class's constructor * in a method it references the constructor's prototype. * in a static method it references the extended class's constructor (but this is different) */ if (jsvIsObject(execInfo.thisVar)) { // 'this' is an object - must be calling a normal method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_INHERITS_VAR, 0); // if we're in a method, get __proto__ first JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; // still in method, get __proto__.__proto__ jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } if (lex->tk=='(') return proto2; // eg. used in a constructor // But if we're doing something else - eg '.' or '[' then it needs to reference the prototype JsVar *proto3 = jsvIsFunction(proto2) ? jsvObjectGetChild(proto2, JSPARSE_PROTOTYPE_VAR, 0) : 0; jsvUnLock(proto2); return proto3; } else if (jsvIsFunction(execInfo.thisVar)) { // 'this' is a function - must be calling a static method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_PROTOTYPE_VAR, 0); JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } return proto2; } jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; #endif } else if (lex->tk==LEX_R_THIS) { JSP_ASSERT_MATCH(LEX_R_THIS); return jsvLockAgain( execInfo.thisVar ? execInfo.thisVar : execInfo.root ); } else if (lex->tk==LEX_R_DELETE) { if (!jspCheckStackPosition()) return 0; return jspeFactorDelete(); } else if (lex->tk==LEX_R_TYPEOF) { if (!jspCheckStackPosition()) return 0; return jspeFactorTypeOf(); } else if (lex->tk==LEX_R_VOID) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_VOID); jsvUnLock(jspeUnaryExpression()); return 0; } JSP_MATCH(LEX_EOF); jsExceptionHere(JSET_SYNTAXERROR, "Unexpected end of Input\n"); return 0; } NO_INLINE JsVar *__jspePostfixExpression(JsVar *a) { while (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *oldValue = jsvAsNumberAndUnLock(jsvSkipName(a)); // keep the old value (but convert to number) JsVar *res = jsvMathsOpSkipNames(oldValue, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jspReplaceWith(a, res); jsvUnLock(res); // but then use the old value jsvUnLock(a); a = oldValue; } } return a; } NO_INLINE JsVar *jspePostfixExpression() { JsVar *a; // TODO: should be in jspeUnaryExpression if (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); a = jspePostfixExpression(); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *res = jsvMathsOpSkipNames(a, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jspReplaceWith(a, res); jsvUnLock(res); } } else a = jspeFactorFunctionCall(); return __jspePostfixExpression(a); } NO_INLINE JsVar *jspeUnaryExpression() { if (lex->tk=='!' || lex->tk=='~' || lex->tk=='-' || lex->tk=='+') { short tk = lex->tk; JSP_ASSERT_MATCH(tk); if (!JSP_SHOULD_EXECUTE) { return jspeUnaryExpression(); } if (tk=='!') { // logical not return jsvNewFromBool(!jsvGetBoolAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='~') { // bitwise not return jsvNewFromInteger(~jsvGetIntegerAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='-') { // unary minus return jsvNegateAndUnLock(jspeUnaryExpression()); // names already skipped } else if (tk=='+') { // unary plus (convert to number) JsVar *v = jsvSkipNameAndUnLock(jspeUnaryExpression()); JsVar *r = jsvAsNumber(v); // names already skipped jsvUnLock(v); return r; } assert(0); return 0; } else return jspePostfixExpression(); } // Get the precedence of a BinaryExpression - or return 0 if not one unsigned int jspeGetBinaryExpressionPrecedence(int op) { switch (op) { case LEX_OROR: return 1; break; case LEX_ANDAND: return 2; break; case '|' : return 3; break; case '^' : return 4; break; case '&' : return 5; break; case LEX_EQUAL: case LEX_NEQUAL: case LEX_TYPEEQUAL: case LEX_NTYPEEQUAL: return 6; case LEX_LEQUAL: case LEX_GEQUAL: case '<': case '>': case LEX_R_INSTANCEOF: return 7; case LEX_R_IN: return (execInfo.execute&EXEC_FOR_INIT)?0:7; case LEX_LSHIFT: case LEX_RSHIFT: case LEX_RSHIFTUNSIGNED: return 8; case '+': case '-': return 9; case '*': case '/': case '%': return 10; default: return 0; } } NO_INLINE JsVar *__jspeBinaryExpression(JsVar *a, unsigned int lastPrecedence) { /* This one's a bit strange. Basically all the ops have their own precedence, it's not * like & and | share the same precedence. We don't want to recurse for each one, * so instead we do this. * * We deal with an expression in recursion ONLY if it's of higher precedence * than the current one, otherwise we stick in the while loop. */ unsigned int precedence = jspeGetBinaryExpressionPrecedence(lex->tk); while (precedence && precedence>lastPrecedence) { int op = lex->tk; JSP_ASSERT_MATCH(op); // if we have short-circuit ops, then if we know the outcome // we don't bother to execute the other op. Even if not // we need to tell mathsOp it's an & or | if (op==LEX_ANDAND || op==LEX_OROR) { bool aValue = jsvGetBoolAndUnLock(jsvSkipName(a)); if ((!aValue && op==LEX_ANDAND) || (aValue && op==LEX_OROR)) { // use first argument (A) JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(__jspeBinaryExpression(jspeUnaryExpression(),precedence)); JSP_RESTORE_EXECUTE(); } else { // use second argument (B) jsvUnLock(a); a = __jspeBinaryExpression(jspeUnaryExpression(),precedence); } } else { // else it's a more 'normal' logical expression - just use Maths JsVar *b = __jspeBinaryExpression(jspeUnaryExpression(),precedence); if (JSP_SHOULD_EXECUTE) { if (op==LEX_R_IN) { JsVar *av = jsvSkipName(a); // needle JsVar *bv = jsvSkipName(b); // haystack if (jsvIsArray(bv) || jsvIsObject(bv)) { // search keys, NOT values av = jsvAsArrayIndexAndUnLock(av); JsVar *varFound = jspGetVarNamedField( bv, av, true); jsvUnLock(a); a = jsvNewFromBool(varFound!=0); jsvUnLock(varFound); } else {// else it will be undefined jsExceptionHere(JSET_ERROR, "Cannot use 'in' operator to search a %t", bv); jsvUnLock(a); a = 0; } jsvUnLock2(av, bv); } else if (op==LEX_R_INSTANCEOF) { bool inst = false; JsVar *av = jsvSkipName(a); JsVar *bv = jsvSkipName(b); if (!jsvIsFunction(bv)) { jsExceptionHere(JSET_ERROR, "Expecting a function on RHS in instanceof check, got %t", bv); } else { if (jsvIsObject(av) || jsvIsFunction(av)) { JsVar *bproto = jspGetNamedField(bv, JSPARSE_PROTOTYPE_VAR, false); JsVar *proto = jsvObjectGetChild(av, JSPARSE_INHERITS_VAR, 0); while (proto) { if (proto == bproto) inst=true; // search prototype chain JsVar *childProto = jsvObjectGetChild(proto, JSPARSE_INHERITS_VAR, 0); jsvUnLock(proto); proto = childProto; } if (jspIsConstructor(bv, "Object")) inst = true; jsvUnLock(bproto); } if (!inst) { const char *name = jswGetBasicObjectName(av); if (name) { inst = jspIsConstructor(bv, name); } // Hack for built-ins that should also be instances of Object if (!inst && (jsvIsArray(av) || jsvIsArrayBuffer(av)) && jspIsConstructor(bv, "Object")) inst = true; } } jsvUnLock3(av, bv, a); a = jsvNewFromBool(inst); } else { // --------------------------------------------- NORMAL JsVar *res = jsvMathsOpSkipNames(a, b, op); jsvUnLock(a); a = res; } } jsvUnLock(b); } precedence = jspeGetBinaryExpressionPrecedence(lex->tk); } return a; } JsVar *jspeBinaryExpression() { return __jspeBinaryExpression(jspeUnaryExpression(),0); } NO_INLINE JsVar *__jspeConditionalExpression(JsVar *lhs) { if (lex->tk=='?') { JSP_ASSERT_MATCH('?'); if (!JSP_SHOULD_EXECUTE) { // just let lhs pass through jsvUnLock(jspeAssignmentExpression()); JSP_MATCH(':'); jsvUnLock(jspeAssignmentExpression()); } else { bool first = jsvGetBoolAndUnLock(jsvSkipName(lhs)); jsvUnLock(lhs); if (first) { lhs = jspeAssignmentExpression(); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); } else { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); JSP_MATCH(':'); lhs = jspeAssignmentExpression(); } } } return lhs; } JsVar *jspeConditionalExpression() { return __jspeConditionalExpression(jspeBinaryExpression()); } NO_INLINE JsVar *__jspeAssignmentExpression(JsVar *lhs) { if (lex->tk=='=' || lex->tk==LEX_PLUSEQUAL || lex->tk==LEX_MINUSEQUAL || lex->tk==LEX_MULEQUAL || lex->tk==LEX_DIVEQUAL || lex->tk==LEX_MODEQUAL || lex->tk==LEX_ANDEQUAL || lex->tk==LEX_OREQUAL || lex->tk==LEX_XOREQUAL || lex->tk==LEX_RSHIFTEQUAL || lex->tk==LEX_LSHIFTEQUAL || lex->tk==LEX_RSHIFTUNSIGNEDEQUAL) { JsVar *rhs; int op = lex->tk; JSP_ASSERT_MATCH(op); rhs = jspeAssignmentExpression(); rhs = jsvSkipNameAndUnLock(rhs); // ensure we get rid of any references on the RHS if (JSP_SHOULD_EXECUTE && lhs) { if (op=='=') { /* If we're assigning to this and we don't have a parent, * add it to the symbol table root */ if (!jsvGetRefs(lhs) && jsvIsName(lhs)) { if (!jsvIsArrayBufferName(lhs) && !jsvIsNewChild(lhs)) jsvAddName(execInfo.root, lhs); } jspReplaceWith(lhs, rhs); } else { if (op==LEX_PLUSEQUAL) op='+'; else if (op==LEX_MINUSEQUAL) op='-'; else if (op==LEX_MULEQUAL) op='*'; else if (op==LEX_DIVEQUAL) op='/'; else if (op==LEX_MODEQUAL) op='%'; else if (op==LEX_ANDEQUAL) op='&'; else if (op==LEX_OREQUAL) op='|'; else if (op==LEX_XOREQUAL) op='^'; else if (op==LEX_RSHIFTEQUAL) op=LEX_RSHIFT; else if (op==LEX_LSHIFTEQUAL) op=LEX_LSHIFT; else if (op==LEX_RSHIFTUNSIGNEDEQUAL) op=LEX_RSHIFTUNSIGNED; if (op=='+' && jsvIsName(lhs)) { JsVar *currentValue = jsvSkipName(lhs); if (jsvIsString(currentValue) && !jsvIsFlatString(currentValue) && jsvGetRefs(currentValue)==1 && rhs!=currentValue) { /* A special case for string += where this is the only use of the string * and we're not appending to ourselves. In this case we can do a * simple append (rather than clone + append)*/ JsVar *str = jsvAsString(rhs, false); jsvAppendStringVarComplete(currentValue, str); jsvUnLock(str); op = 0; } jsvUnLock(currentValue); } if (op) { /* Fallback which does a proper add */ JsVar *res = jsvMathsOpSkipNames(lhs,rhs,op); jspReplaceWith(lhs, res); jsvUnLock(res); } } } jsvUnLock(rhs); } return lhs; } JsVar *jspeAssignmentExpression() { return __jspeAssignmentExpression(jspeConditionalExpression()); } // ',' is allowed to add multiple expressions, this is not allowed in jspeAssignmentExpression NO_INLINE JsVar *jspeExpression() { while (!JSP_SHOULDNT_PARSE) { JsVar *a = jspeAssignmentExpression(); if (lex->tk!=',') return a; // if we get a comma, we just forget this data and parse the next bit... jsvUnLock(a); JSP_ASSERT_MATCH(','); } return 0; } /** Parse a block `{ ... }` but assume brackets are already parsed */ NO_INLINE void jspeBlockNoBrackets() { if (JSP_SHOULD_EXECUTE) { while (lex->tk && lex->tk!='}') { jsvUnLock(jspeStatement()); if (JSP_HAS_ERROR) { if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) { execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED); JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, "at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); } } } if (JSP_SHOULDNT_PARSE) return; } } else { // fast skip of blocks int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; JSP_ASSERT_MATCH(lex->tk); } } return; } /** Parse a block `{ ... }` */ NO_INLINE void jspeBlock() { JSP_MATCH_WITH_RETURN('{',); jspeBlockNoBrackets(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN('}',); return; } NO_INLINE JsVar *jspeBlockOrStatement() { if (lex->tk=='{') { jspeBlock(); return 0; } else { JsVar *v = jspeStatement(); if (lex->tk==';') JSP_ASSERT_MATCH(';'); return v; } } /** Parse using current lexer until we hit the end of * input or there was some problem. */ NO_INLINE JsVar *jspParse() { JsVar *v = 0; while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) { jsvUnLock(v); v = jspeBlockOrStatement(); } return v; } NO_INLINE JsVar *jspeStatementVar() { JsVar *lastDefined = 0; /* variable creation. TODO - we need a better way of parsing the left * hand side. Maybe just have a flag called can_create_var that we * set and then we parse as if we're doing a normal equals.*/ assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST); jslGetNextToken(); ///TODO: Correctly implement CONST and LET - we just treat them like 'var' at the moment bool hasComma = true; // for first time in loop while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { a = jspeiFindOnTop(jslGetTokenValueAsString(lex), true); if (!a) { // out of memory jspSetError(false); return lastDefined; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined); // sort out initialiser if (lex->tk == '=') { JsVar *var; JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined); var = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (JSP_SHOULD_EXECUTE) jspReplaceWith(a, var); jsvUnLock(var); } jsvUnLock(lastDefined); lastDefined = a; hasComma = lex->tk == ','; if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined); } return lastDefined; } NO_INLINE JsVar *jspeStatementIf() { bool cond; JsVar *var, *result = 0; JSP_ASSERT_MATCH(LEX_R_IF); JSP_MATCH('('); var = jspeExpression(); if (JSP_SHOULDNT_PARSE) return var; JSP_MATCH(')'); cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var)); jsvUnLock(var); JSP_SAVE_EXECUTE(); if (!cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); if (!cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); } else { result = a; } if (lex->tk==LEX_R_ELSE) { JSP_ASSERT_MATCH(LEX_R_ELSE); JSP_SAVE_EXECUTE(); if (cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); if (cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); } else { result = a; } } return result; } NO_INLINE JsVar *jspeStatementSwitch() { JSP_ASSERT_MATCH(LEX_R_SWITCH); JSP_MATCH('('); JsVar *switchOn = jspeExpression(); JSP_SAVE_EXECUTE(); bool execute = JSP_SHOULD_EXECUTE; JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0); // shortcut if not executing... if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0); bool executeDefault = true; if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH; while (lex->tk==LEX_R_CASE) { JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0); JsExecFlags oldFlags = execInfo.execute; if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; JsVar *test = jspeAssignmentExpression(); execInfo.execute = oldFlags|EXEC_IN_SWITCH;; JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0); bool cond = false; if (execute) cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL)); if (cond) executeDefault = false; jsvUnLock(test); if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns } jsvUnLock(switchOn); if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) { execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; } else { executeDefault = true; } JSP_RESTORE_EXECUTE(); if (lex->tk==LEX_R_DEFAULT) { JSP_ASSERT_MATCH(LEX_R_DEFAULT); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); if (!executeDefault) jspSetNoExecute(); else execInfo.execute |= EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK; JSP_RESTORE_EXECUTE(); } JSP_MATCH('}'); return 0; } NO_INLINE JsVar *jspeStatementDoOrWhile(bool isWhile) { #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif JsVar *cond; bool loopCond = true; // true for do...while loops bool hasHadBreak = false; JslCharPos whileCondStart; // We do repetition by pulling out the string representing our statement // there's definitely some opportunity for optimisation here JSP_ASSERT_MATCH(isWhile ? LEX_R_WHILE : LEX_R_DO); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; if (isWhile) { // while loop JSP_MATCH('('); whileCondStart = jslCharPosClone(&lex->tokenStart); cond = jspeAssignmentExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyStart = jslCharPosClone(&lex->tokenStart); JSP_SAVE_EXECUTE(); // actually try and execute first bit of while loop (we'll do the rest in the actual loop later) if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = EXEC_YES; hasHadBreak = true; // fail loop condition, so we exit } if (!loopCond) JSP_RESTORE_EXECUTE(); if (!isWhile) { // do..while loop JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_WHILE,jslCharPosFree(&whileBodyStart);,0); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileBodyStart);if (isWhile)jslCharPosFree(&whileCondStart);,0); whileCondStart = jslCharPosClone(&lex->tokenStart); cond = jspeAssignmentExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyEnd; whileBodyEnd = jslCharPosClone(&lex->tokenStart); while (!hasHadBreak && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&whileCondStart); cond = jspeAssignmentExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); if (loopCond) { jslSeekToP(&whileBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = EXEC_YES; hasHadBreak = true; } } } jslSeekToP(&whileBodyEnd); jslCharPosFree(&whileCondStart); jslCharPosFree(&whileBodyStart); jslCharPosFree(&whileBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "WHILE Loop exceeded the maximum number of iterations (" STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS) ")"); } #endif return 0; } NO_INLINE JsVar *jspeStatementFor() { JSP_ASSERT_MATCH(LEX_R_FOR); JSP_MATCH('('); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; execInfo.execute |= EXEC_FOR_INIT; // initialisation JsVar *forStatement = 0; // we could have 'for (;;)' - so don't munch up our semicolon if that's all we have if (lex->tk != ';') forStatement = jspeStatement(); if (jspIsInterrupted()) { jsvUnLock(forStatement); return 0; } execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT; if (lex->tk == LEX_R_IN) { // for (i in array) // where i = jsvUnLock(forStatement); if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) { jsvUnLock(forStatement); jsExceptionHere(JSET_ERROR, "FOR a IN b - 'a' must be a variable name, not %t", forStatement); return 0; } bool addedIteratorToScope = false; if (JSP_SHOULD_EXECUTE && !jsvGetRefs(forStatement)) { // if the variable did not exist, add it to the scope addedIteratorToScope = true; jsvAddName(execInfo.root, forStatement); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_IN, jsvUnLock(forStatement), 0); JsVar *array = jsvSkipNameAndUnLock(jspeExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array), 0); JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart); JSP_SAVE_EXECUTE(); jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; JSP_RESTORE_EXECUTE(); if (JSP_SHOULD_EXECUTE) { if (jsvIsIterable(array)) { JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array); JsVar *foundPrototype = 0; JsvIterator it; jsvIteratorNew(&it, array, JSIF_DEFINED_ARRAY_ElEMENTS); bool hasHadBreak = false; while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) { JsVar *loopIndexVar = jsvIteratorGetKey(&it); bool ignore = false; if (checkerFunction && checkerFunction(loopIndexVar)) { ignore = true; if (jsvIsString(loopIndexVar) && jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR)) foundPrototype = jsvSkipName(loopIndexVar); } if (!ignore) { JsVar *indexValue = jsvIsName(loopIndexVar) ? jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) : loopIndexVar; if (indexValue) { // could be out of memory assert(!jsvIsName(indexValue) && jsvGetRefs(indexValue)==0); jsvSetValueOfName(forStatement, indexValue); if (indexValue!=loopIndexVar) jsvUnLock(indexValue); jsvIteratorNext(&it); jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = EXEC_YES; hasHadBreak = true; } } } else jsvIteratorNext(&it); jsvUnLock(loopIndexVar); if (!jsvIteratorHasElement(&it) && foundPrototype) { jsvIteratorFree(&it); jsvIteratorNew(&it, foundPrototype, JSIF_DEFINED_ARRAY_ElEMENTS); jsvUnLock(foundPrototype); foundPrototype = 0; } } assert(!foundPrototype); jsvIteratorFree(&it); } else if (!jsvIsUndefined(array)) { jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); if (addedIteratorToScope) { jsvRemoveChild(execInfo.root, forStatement); } jsvUnLock2(forStatement, array); } else { // ----------------------------------------------- NORMAL FOR LOOP #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif bool loopCond = true; bool hasHadBreak = false; jsvUnLock(forStatement); JSP_MATCH(';'); JslCharPos forCondStart = jslCharPosClone(&lex->tokenStart); if (lex->tk != ';') { JsVar *cond = jspeAssignmentExpression(); // condition loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0); JslCharPos forIterStart = jslCharPosClone(&lex->tokenStart); if (lex->tk != ')') { // we could have 'for (;;)' JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeExpression()); // iterator JSP_RESTORE_EXECUTE(); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0); JslCharPos forBodyStart = jslCharPosClone(&lex->tokenStart); // actual for body JSP_SAVE_EXECUTE(); if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd = jslCharPosClone(&lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (loopCond || !JSP_SHOULD_EXECUTE) { if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = EXEC_YES; hasHadBreak = true; } } if (!loopCond) JSP_RESTORE_EXECUTE(); if (loopCond) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&forCondStart); ; if (lex->tk == ';') { loopCond = true; } else { JsVar *cond = jspeAssignmentExpression(); loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (JSP_SHOULD_EXECUTE && loopCond) { jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = EXEC_YES; hasHadBreak = true; } } if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forCondStart); jslCharPosFree(&forIterStart); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")"); } #endif } return 0; } NO_INLINE JsVar *jspeStatementTry() { // execute the try block JSP_ASSERT_MATCH(LEX_R_TRY); bool shouldExecuteBefore = JSP_SHOULD_EXECUTE; jspeBlock(); bool hadException = shouldExecuteBefore && ((execInfo.execute & EXEC_EXCEPTION)!=0); bool hadCatch = false; if (lex->tk == LEX_R_CATCH) { JSP_ASSERT_MATCH(LEX_R_CATCH); hadCatch = true; JSP_MATCH('('); JsVar *scope = 0; JsVar *exceptionVar = 0; if (hadException) { scope = jsvNewObject(); if (scope) exceptionVar = jsvFindChildFromString(scope, jslGetTokenValueAsString(lex), true); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock2(scope,exceptionVar),0); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jsvUnLock2(scope,exceptionVar),0); if (exceptionVar) { // set the exception var up properly JsVar *exception = jspGetException(); if (exception) { jsvSetValueOfName(exceptionVar, exception); jsvUnLock(exception); } // Now clear the exception flag (it's handled - we hope!) execInfo.execute = execInfo.execute & (JsExecFlags)~(EXEC_EXCEPTION|EXEC_ERROR_LINE_REPORTED); jsvUnLock(exceptionVar); } if (shouldExecuteBefore && !hadException) { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jspeBlock(); JSP_RESTORE_EXECUTE(); } else if (scope) { if (jspeiAddScope(scope)) { jspeBlock(); jspeiRemoveScope(); } } jsvUnLock(scope); } if (lex->tk == LEX_R_FINALLY || (!hadCatch && ((execInfo.execute&(EXEC_ERROR|EXEC_INTERRUPTED))==0))) { JSP_MATCH(LEX_R_FINALLY); // clear the exception flag - but only momentarily! if (hadException) execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_EXCEPTION; jspeBlock(); // put the flag back! if (hadException && !hadCatch) execInfo.execute = execInfo.execute | EXEC_EXCEPTION; } return 0; } NO_INLINE JsVar *jspeStatementReturn() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_RETURN); if (lex->tk != ';' && lex->tk != '}') { // we only want the value, so skip the name if there was one result = jsvSkipNameAndUnLock(jspeExpression()); } if (JSP_SHOULD_EXECUTE) { JsVar *resultVar = jspeiFindInScopes(JSPARSE_RETURN_VAR); if (resultVar) { jspReplaceWith(resultVar, result); jsvUnLock(resultVar); execInfo.execute |= EXEC_RETURN; // Stop anything else in this function executing } else { jsExceptionHere(JSET_SYNTAXERROR, "RETURN statement, but not in a function.\n"); } } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementThrow() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_THROW); result = jsvSkipNameAndUnLock(jspeExpression()); if (JSP_SHOULD_EXECUTE) { jspSetException(result); // Stop anything else in this function executing } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementFunctionDecl(bool isClass) { JsVar *funcName = 0; JsVar *funcVar; #ifndef SAVE_ON_FLASH JSP_ASSERT_MATCH(isClass ? LEX_R_CLASS : LEX_R_FUNCTION); #else JSP_ASSERT_MATCH(LEX_R_FUNCTION); #endif bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) { funcName = jsvMakeIntoVariableName(jslGetTokenValueAsVar(lex), 0); if (!funcName) { // out of memory return 0; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(funcName), 0); #ifndef SAVE_ON_FLASH funcVar = isClass ? jspeClassDefinition(false) : jspeFunctionDefinition(false); #else funcVar = jspeFunctionDefinition(false); #endif if (actuallyCreateFunction) { // find a function with the same name (or make one) // OPT: can Find* use just a JsVar that is a 'name'? JsVar *existingName = jspeiFindNameOnTop(funcName, true); JsVar *existingFunc = jsvSkipName(existingName); if (jsvIsFunction(existingFunc)) { // 'proper' replace, that keeps the original function var and swaps the children funcVar = jsvSkipNameAndUnLock(funcVar); jswrap_function_replaceWith(existingFunc, funcVar); } else { jspReplaceWith(existingName, funcVar); } jsvUnLock(funcName); funcName = existingName; jsvUnLock(existingFunc); // existingName is used - don't UnLock } jsvUnLock(funcVar); return funcName; } NO_INLINE JsVar *jspeStatement() { #ifdef USE_DEBUGGER if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && lex->tk!=';' && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = jsvStringIteratorGetIndex(&lex->tokenStart.it)-1; jsiDebuggerLoop(); } #endif if (lex->tk==LEX_ID || lex->tk==LEX_INT || lex->tk==LEX_FLOAT || lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_REGEX || lex->tk==LEX_R_NEW || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_THIS || lex->tk==LEX_R_DELETE || lex->tk==LEX_R_TYPEOF || lex->tk==LEX_R_VOID || lex->tk==LEX_R_SUPER || lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS || lex->tk=='!' || lex->tk=='-' || lex->tk=='+' || lex->tk=='~' || lex->tk=='[' || lex->tk=='(') { /* Execute a simple statement that only contains basic arithmetic... */ return jspeExpression(); } else if (lex->tk=='{') { /* A block of code */ jspeBlock(); return 0; } else if (lex->tk==';') { /* Empty statement - to allow things like ;;; */ JSP_ASSERT_MATCH(';'); return 0; } else if (lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST) { return jspeStatementVar(); } else if (lex->tk==LEX_R_IF) { return jspeStatementIf(); } else if (lex->tk==LEX_R_DO) { return jspeStatementDoOrWhile(false); } else if (lex->tk==LEX_R_WHILE) { return jspeStatementDoOrWhile(true); } else if (lex->tk==LEX_R_FOR) { return jspeStatementFor(); } else if (lex->tk==LEX_R_TRY) { return jspeStatementTry(); } else if (lex->tk==LEX_R_RETURN) { return jspeStatementReturn(); } else if (lex->tk==LEX_R_THROW) { return jspeStatementThrow(); } else if (lex->tk==LEX_R_FUNCTION) { return jspeStatementFunctionDecl(false/* function */); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { return jspeStatementFunctionDecl(true/* class */); #endif } else if (lex->tk==LEX_R_CONTINUE) { JSP_ASSERT_MATCH(LEX_R_CONTINUE); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & EXEC_IN_LOOP)) jsExceptionHere(JSET_SYNTAXERROR, "CONTINUE statement outside of FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_CONTINUE; } } else if (lex->tk==LEX_R_BREAK) { JSP_ASSERT_MATCH(LEX_R_BREAK); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & (EXEC_IN_LOOP|EXEC_IN_SWITCH))) jsExceptionHere(JSET_SYNTAXERROR, "BREAK statement outside of SWITCH, FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_BREAK; } } else if (lex->tk==LEX_R_SWITCH) { return jspeStatementSwitch(); } else if (lex->tk==LEX_R_DEBUGGER) { JSP_ASSERT_MATCH(LEX_R_DEBUGGER); #ifdef USE_DEBUGGER if (JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } else JSP_MATCH(LEX_EOF); return 0; } // ----------------------------------------------------------------------------- /// Create a new built-in object that jswrapper can use to check for built-in functions JsVar *jspNewBuiltin(const char *instanceOf) { JsVar *objFunc = jswFindBuiltInFunction(0, instanceOf); if (!objFunc) return 0; // out of memory return objFunc; } /// Create a new Class of the given instance and return its prototype NO_INLINE JsVar *jspNewPrototype(const char *instanceOf) { JsVar *objFuncName = jsvFindChildFromString(execInfo.root, instanceOf, true); if (!objFuncName) // out of memory return 0; JsVar *objFunc = jsvSkipName(objFuncName); if (!objFunc) { objFunc = jspNewBuiltin(instanceOf); if (!objFunc) { // out of memory jsvUnLock(objFuncName); return 0; } // set up name jsvSetValueOfName(objFuncName, objFunc); } JsVar *prototypeName = jsvFindChildFromString(objFunc, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(objFunc, prototypeName); // make sure it's an object jsvUnLock2(objFunc, objFuncName); return prototypeName; } /** Create a new object of the given instance and add it to root with name 'name'. * If name!=0, added to root with name, and the name is returned * If name==0, not added to root and Object itself returned */ NO_INLINE JsVar *jspNewObject(const char *name, const char *instanceOf) { JsVar *prototypeName = jspNewPrototype(instanceOf); JsVar *obj = jsvNewObject(); if (!obj) { // out of memory jsvUnLock(prototypeName); return 0; } if (name) { // If it's a device, set the device number up as the Object data // See jsiGetDeviceFromClass IOEventFlags device = jshFromDeviceString(name); if (device!=EV_NONE) { obj->varData.str[0] = 'D'; obj->varData.str[1] = 'E'; obj->varData.str[2] = 'V'; obj->varData.str[3] = (char)device; } } // add __proto__ JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(obj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName);prototypeName=0; if (name) { JsVar *objName = jsvFindChildFromString(execInfo.root, name, true); if (objName) jsvSetValueOfName(objName, obj); jsvUnLock(obj); if (!objName) { // out of memory return 0; } return objName; } else return obj; } /** Returns true if the constructor function given is the same as that * of the object with the given name. */ bool jspIsConstructor(JsVar *constructor, const char *constructorName) { JsVar *objFunc = jsvObjectGetChild(execInfo.root, constructorName, 0); if (!objFunc) return false; bool isConstructor = objFunc == constructor; jsvUnLock(objFunc); return isConstructor; } /** Get the constructor of the given object, or return 0 if ot found, or not a function */ JsVar *jspGetConstructor(JsVar *object) { if (!jsvIsObject(object)) return 0; JsVar *proto = jsvObjectGetChild(object, JSPARSE_INHERITS_VAR, 0); if (jsvIsObject(proto)) { JsVar *constr = jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0); if (jsvIsFunction(constr)) { jsvUnLock(proto); return constr; } jsvUnLock(constr); } jsvUnLock(proto); return 0; } // ----------------------------------------------------------------------------- void jspSoftInit() { execInfo.root = jsvFindOrCreateRoot(); // Root now has a lock and a ref execInfo.hiddenRoot = jsvObjectGetChild(execInfo.root, JS_HIDDEN_CHAR_STR, JSV_OBJECT); execInfo.execute = EXEC_YES; } void jspSoftKill() { jsvUnLock(execInfo.hiddenRoot); execInfo.hiddenRoot = 0; jsvUnLock(execInfo.root); execInfo.root = 0; // Root is now left with just a ref } void jspInit() { jspSoftInit(); } void jspKill() { jspSoftKill(); // Unreffing this should completely kill everything attached to root JsVar *r = jsvFindOrCreateRoot(); jsvUnRef(r); jsvUnLock(r); } /** Evaluate the given variable as an expression (in current scope) */ JsVar *jspEvaluateExpressionVar(JsVar *str) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); lex.lineNumberOffset = oldLex->lineNumberOffset; // actually do the parsing JsVar *v = jspeExpression(); jslKill(); jslSetLex(oldLex); return jsvSkipNameAndUnLock(v); } /** Execute code form a variable and return the result. If lineNumberOffset * is nonzero it's added to the line numbers that get reported for errors/debug */ JsVar *jspEvaluateVar(JsVar *str, JsVar *scope, uint16_t lineNumberOffset) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); lex.lineNumberOffset = lineNumberOffset; JsExecInfo oldExecInfo = execInfo; execInfo.execute = EXEC_YES; bool scopeAdded = false; if (scope) { // if we're adding a scope, make sure it's the *only* scope execInfo.scopeCount = 0; scopeAdded = jspeiAddScope(scope); } // actually do the parsing JsVar *v = jspParse(); // clean up if (scopeAdded) jspeiRemoveScope(); jslKill(); jslSetLex(oldLex); // restore state and execInfo JsExecFlags mask = EXEC_FOR_INIT|EXEC_IN_LOOP|EXEC_IN_SWITCH; oldExecInfo.execute = (oldExecInfo.execute & mask) | (execInfo.execute & ~mask); execInfo = oldExecInfo; // It may have returned a reference, but we just want the value... return jsvSkipNameAndUnLock(v); } JsVar *jspEvaluate(const char *str, bool stringIsStatic) { /* using a memory area is more efficient, but the interpreter * may use substrings from it for function code. This means that * if the string goes away, everything gets corrupted - hence * the option here. */ JsVar *evCode; if (stringIsStatic) evCode = jsvNewNativeString((char*)str, strlen(str)); else evCode = jsvNewFromString(str); if (!evCode) return 0; JsVar *v = 0; if (!jsvIsMemoryFull()) v = jspEvaluateVar(evCode, 0, 0); jsvUnLock(evCode); return v; } JsVar *jspExecuteFunction(JsVar *func, JsVar *thisArg, int argCount, JsVar **argPtr) { JsExecInfo oldExecInfo = execInfo; execInfo.scopeCount = 0; execInfo.execute = EXEC_YES; execInfo.thisVar = 0; JsVar *result = jspeFunctionCall(func, 0, thisArg, false, argCount, argPtr); // clean up assert(execInfo.scopeCount==0); // restore state oldExecInfo.execute = execInfo.execute; // JSP_RESTORE_EXECUTE has made this ok. execInfo = oldExecInfo; return result; } /// Evaluate a JavaScript module and return its exports JsVar *jspEvaluateModule(JsVar *moduleContents) { assert(jsvIsString(moduleContents) || jsvIsFunction(moduleContents)); if (jsvIsFunction(moduleContents)) { moduleContents = jsvObjectGetChild(moduleContents,JSPARSE_FUNCTION_CODE_NAME,0); if (!jsvIsString(moduleContents)) { jsvUnLock(moduleContents); return 0; } } else jsvLockAgain(moduleContents); JsVar *scope = jsvNewObject(); JsVar *scopeExports = jsvNewObject(); if (!scope || !scopeExports) { // out of mem jsvUnLock3(scope, scopeExports, moduleContents); return 0; } JsVar *exportsName = jsvAddNamedChild(scope, scopeExports, "exports"); jsvUnLock2(scopeExports, jsvAddNamedChild(scope, scope, "module")); JsExecFlags oldExecute = execInfo.execute; JsVar *oldThisVar = execInfo.thisVar; execInfo.thisVar = scopeExports; // set 'this' variable to exports jsvUnLock(jspEvaluateVar(moduleContents, scope, 0)); execInfo.thisVar = oldThisVar; execInfo.execute = oldExecute; // make sure we fully restore state after parsing a module jsvUnLock2(moduleContents, scope); return jsvSkipNameAndUnLock(exportsName); } /** Get the owner of the current prototype. We assume that it's * the first item in the array, because that's what we will * have added when we created it. It's safe to call this on * non-prototypes and non-objects. */ JsVar *jspGetPrototypeOwner(JsVar *proto) { if (jsvIsObject(proto) || jsvIsArray(proto)) { return jsvSkipNameAndUnLock(jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0)); } return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_159_1
crossvul-cpp_data_good_2796_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * Chetan Loke : Implemented TPACKET_V3 block abstraction * layer. * Copyright (C) 2011, <lokec@ccs.neu.edu> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #include <linux/percpu.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif #include <linux/bpf.h> #include <net/compat.h> #include "internal.h" /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring); #define V3_ALIGNMENT (8) #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) #define PGV_FROM_VMALLOC 1 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) struct packet_sock; static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status); static void packet_increment_head(struct packet_ring_buffer *buff); static int prb_curr_blk_in_use(struct tpacket_block_desc *); static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, struct packet_sock *); static void prb_retire_current_block(struct tpacket_kbdq_core *, struct packet_sock *, unsigned int status); static int prb_queue_frozen(struct tpacket_kbdq_core *); static void prb_open_block(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void prb_retire_rx_blk_timer_expired(unsigned long); static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); static void prb_init_blk_timer(struct packet_sock *, struct tpacket_kbdq_core *, void (*func) (unsigned long)); static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_clear_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_fill_vlan_info(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void packet_flush_mclist(struct sock *sk); static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb); struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { /* Trick: alias skb original length with * ll.sll_family and ll.protocol in order * to save room. */ unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; #define vio_le() virtio_legacy_is_little_endian() #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) #define GET_PBLOCK_DESC(x, bid) \ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) #define GET_NEXT_PRB_BLK_NUM(x) \ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; skb = validate_xmit_skb_list(skb, dev); if (skb != orig_skb) goto drop; packet_pick_tx_queue(dev, skb); txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); if (!dev_xmit_complete(ret)) kfree_skb(skb); return ret; drop: atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return NET_XMIT_DROP; } static struct net_device *packet_cached_dev_get(struct packet_sock *po) { struct net_device *dev; rcu_read_lock(); dev = rcu_dereference(po->cached_dev); if (likely(dev)) dev_hold(dev); rcu_read_unlock(); return dev; } static void packet_cached_dev_assign(struct packet_sock *po, struct net_device *dev) { rcu_assign_pointer(po->cached_dev, dev); } static void packet_cached_dev_reset(struct packet_sock *po) { RCU_INIT_POINTER(po->cached_dev, NULL); } static bool packet_use_direct_xmit(const struct packet_sock *po) { return po->xmit == packet_direct_xmit; } static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index; if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb, NULL, __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = __packet_pick_tx_queue(dev, skb); } skb_set_queue_mapping(skb, queue_index); } /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ static void register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); if (!po->running) { if (po->fanout) __fanout_link(sk, po); else dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } } /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock * held. If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the * callers responsibility to take care of this. */ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); po->running = 0; if (po->fanout) __fanout_unlink(sk, po); else __dev_remove_pack(&po->prot_hook); __sock_put(sk); if (sync) { spin_unlock(&po->bind_lock); synchronize_net(); spin_lock(&po->bind_lock); } } static void unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); if (po->running) __unregister_prot_hook(sk, sync); } static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: h.h3->tp_status = status; flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union tpacket_uhdr h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); return h.h3->tp_status; default: WARN(1, "TPACKET version not supported.\n"); BUG(); return 0; } } static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, unsigned int flags) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (shhwtstamps && (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) return TP_STATUS_TS_RAW_HARDWARE; if (ktime_to_timespec_cond(skb->tstamp, ts)) return TP_STATUS_TS_SOFTWARE; return 0; } static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, struct sk_buff *skb) { union tpacket_uhdr h; struct timespec ts; __u32 ts_status; if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) return 0; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; break; case TPACKET_V2: h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; break; default: WARN(1, "TPACKET version not supported.\n"); BUG(); } /* one flush is safe, as both fields always lie on the same cacheline */ flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); smp_wmb(); return ts_status; } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union tpacket_uhdr h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); } static void prb_shutdown_retire_blk_timer(struct packet_sock *po, struct sk_buff_head *rb_queue) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); spin_lock_bh(&rb_queue->lock); pkc->delete_blk_timer = 1; spin_unlock_bh(&rb_queue->lock); prb_del_retire_blk_timer(pkc); } static void prb_init_blk_timer(struct packet_sock *po, struct tpacket_kbdq_core *pkc, void (*func) (unsigned long)) { init_timer(&pkc->retire_blk_timer); pkc->retire_blk_timer.data = (long)po; pkc->retire_blk_timer.function = func; pkc->retire_blk_timer.expires = jiffies; } static void prb_setup_retire_blk_timer(struct packet_sock *po) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); } static int prb_calc_retire_blk_tmo(struct packet_sock *po, int blk_size_in_bytes) { struct net_device *dev; unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; struct ethtool_link_ksettings ecmd; int err; rtnl_lock(); dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); if (unlikely(!dev)) { rtnl_unlock(); return DEFAULT_PRB_RETIRE_TOV; } err = __ethtool_get_link_ksettings(dev, &ecmd); rtnl_unlock(); if (!err) { /* * If the link speed is so slow you don't really * need to worry about perf anyways */ if (ecmd.base.speed < SPEED_1000 || ecmd.base.speed == SPEED_UNKNOWN) { return DEFAULT_PRB_RETIRE_TOV; } else { msec = 1; div = ecmd.base.speed / 1000; } } mbits = (blk_size_in_bytes * 8) / (1024 * 1024); if (div) mbits /= div; tmo = mbits * msec; if (div) return tmo+1; return tmo; } static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, union tpacket_req_u *req_u) { p1->feature_req_word = req_u->req3.tp_feature_req_word; } static void init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u) { struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd; memset(p1, 0x0, sizeof(*p1)); p1->knxt_seq_num = 1; p1->pkbdq = pg_vec; pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; p1->pkblk_start = pg_vec[0].buffer; p1->kblk_size = req_u->req3.tp_block_size; p1->knum_blocks = req_u->req3.tp_block_nr; p1->hdrlen = po->tp_hdrlen; p1->version = po->tp_version; p1->last_kactive_blk_num = 0; po->stats.stats3.tp_freeze_q_cnt = 0; if (req_u->req3.tp_retire_blk_tov) p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; else p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); prb_setup_retire_blk_timer(po); prb_open_block(p1, pbd); } /* Do NOT update the last_blk_num first. * Assumes sk_buff_head lock is held. */ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) { mod_timer(&pkc->retire_blk_timer, jiffies + pkc->tov_in_jiffies); pkc->last_kactive_blk_num = pkc->kactive_blk_num; } /* * Timer logic: * 1) We refresh the timer only when we open a block. * By doing this we don't waste cycles refreshing the timer * on packet-by-packet basis. * * With a 1MB block-size, on a 1Gbps line, it will take * i) ~8 ms to fill a block + ii) memcpy etc. * In this cut we are not accounting for the memcpy time. * * So, if the user sets the 'tmo' to 10ms then the timer * will never fire while the block is still getting filled * (which is what we want). However, the user could choose * to close a block early and that's fine. * * But when the timer does fire, we check whether or not to refresh it. * Since the tmo granularity is in msecs, it is not too expensive * to refresh the timer, lets say every '8' msecs. * Either the user can set the 'tmo' or we can derive it based on * a) line-speed and b) block-size. * prb_calc_retire_blk_tmo() calculates the tmo. * */ static void prb_retire_rx_blk_timer_expired(unsigned long data) { struct packet_sock *po = (struct packet_sock *)data; struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; spin_lock(&po->sk.sk_receive_queue.lock); frozen = prb_queue_frozen(pkc); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); if (unlikely(pkc->delete_blk_timer)) goto out; /* We only need to plug the race when the block is partially filled. * tpacket_rcv: * lock(); increment BLOCK_NUM_PKTS; unlock() * copy_bits() is in progress ... * timer fires on other cpu: * we can't retire the current block because copy_bits * is in progress. * */ if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { if (!BLOCK_NUM_PKTS(pbd)) { /* An empty block. Just refresh the timer. */ goto refresh_timer; } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; else goto out; } else { /* Case 1. Queue was frozen because user-space was * lagging behind. */ if (prb_curr_blk_in_use(pbd)) { /* * Ok, user-space is still behind. * So just refresh the timer. */ goto refresh_timer; } else { /* Case 2. queue was frozen,user-space caught up, * now the link went idle && the timer fired. * We don't have a block to close.So we open this * block and restart the timer. * opening a block thaws the queue,restarts timer * Thawing/timer-refresh is a side effect. */ prb_open_block(pkc, pbd); goto out; } } } refresh_timer: _prb_refresh_rx_retire_blk_timer(pkc); out: spin_unlock(&po->sk.sk_receive_queue.lock); } static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 u8 *start, *end; start = (u8 *)pbd1; /* Skip the block header(we know header WILL fit in 4K) */ start += PAGE_SIZE; end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); for (; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif /* Now update the block status. */ BLOCK_STATUS(pbd1) = status; /* Flush the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 start = (u8 *)pbd1; flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif } /* * Side effect: * * 1) flush the block * 2) Increment active_blk_num * * Note:We DONT refresh the timer on purpose. * Because almost always the next block will be opened. */ static void prb_close_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, struct packet_sock *po, unsigned int stat) { __u32 status = TP_STATUS_USER | stat; struct tpacket3_hdr *last_pkt; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; struct sock *sk = &po->sk; if (po->stats.stats3.tp_drops) status |= TP_STATUS_LOSING; last_pkt = (struct tpacket3_hdr *)pkc1->prev; last_pkt->tp_next_offset = 0; /* Get the ts of the last pkt */ if (BLOCK_NUM_PKTS(pbd1)) { h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { /* Ok, we tmo'd - so get the current time. * * It shouldn't really happen as we don't close empty * blocks. See prb_retire_rx_blk_timer_expired(). */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; h1->ts_last_pkt.ts_nsec = ts.tv_nsec; } smp_wmb(); /* Flush the block */ prb_flush_block(pkc1, pbd1, status); sk->sk_data_ready(sk); pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } /* * Side effect of opening a block: * * 1) prb_queue is thawed. * 2) retire_blk_timer is refreshed. * */ static void prb_open_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1) { struct timespec ts; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; smp_rmb(); /* We could have just memset this but we will lose the * flexibility of making the priv area sticky */ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; BLOCK_NUM_PKTS(pbd1) = 0; BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); getnstimeofday(&ts); h1->ts_first_pkt.ts_sec = ts.tv_sec; h1->ts_first_pkt.ts_nsec = ts.tv_nsec; pkc1->pkblk_start = (char *)pbd1; pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; pbd1->version = pkc1->version; pkc1->prev = pkc1->nxt_offset; pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; prb_thaw_queue(pkc1); _prb_refresh_rx_retire_blk_timer(pkc1); smp_wmb(); } /* * Queue freeze logic: * 1) Assume tp_block_nr = 8 blocks. * 2) At time 't0', user opens Rx ring. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 * 4) user-space is either sleeping or processing block '0'. * 5) tpacket_rcv is currently filling block '7', since there is no space left, * it will close block-7,loop around and try to fill block '0'. * call-flow: * __packet_lookup_frame_in_block * prb_retire_current_block() * prb_dispatch_next_block() * |->(BLOCK_STATUS == USER) evaluates to true * 5.1) Since block-0 is currently in-use, we just freeze the queue. * 6) Now there are two cases: * 6.1) Link goes idle right after the queue is frozen. * But remember, the last open_block() refreshed the timer. * When this timer expires,it will refresh itself so that we can * re-open block-0 in near future. * 6.2) Link is busy and keeps on receiving packets. This is a simple * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; po->stats.stats3.tp_freeze_q_cnt++; } #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) /* * If the next block is free then we will dispatch it * and return a good offset. * Else, we will freeze the queue. * So, caller must check the return value. */ static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { struct tpacket_block_desc *pbd; smp_rmb(); /* 1. Get current block num */ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* 2. If this block is currently in_use then freeze the queue */ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { prb_freeze_queue(pkc, po); return NULL; } /* * 3. * open this block and return the offset where the first packet * needs to get stored. */ prb_open_block(pkc, pbd); return (void *)pkc->nxt_offset; } static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po, unsigned int status) { struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* retire/close the current block */ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { /* * Plug the case where copy_bits() is in progress on * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't * have space to copy the pkt in the current block and * called prb_retire_current_block() * * We don't need to worry about the TMO case because * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } prb_close_block(pkc, pbd, po, status); return; } } static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); } static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; ppd->tp_status = TP_STATUS_AVAILABLE; } } static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_padding = 0; prb_fill_vlan_info(pkc, ppd); if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) prb_fill_rxhash(pkc, ppd); else prb_clear_rxhash(pkc, ppd); } static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) { struct tpacket3_hdr *ppd; ppd = (struct tpacket3_hdr *)curr; ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); pkc->prev = curr; pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; atomic_inc(&pkc->blk_fill_in_prog); prb_run_all_ft_ops(pkc, ppd); } /* Assumes caller has the sk->rx_queue.lock */ static void *__packet_lookup_frame_in_block(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len ) { struct tpacket_kbdq_core *pkc; struct tpacket_block_desc *pbd; char *curr, *end; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* Queue is frozen when user space is lagging behind */ if (prb_queue_frozen(pkc)) { /* * Check if that last block which caused the queue to freeze, * is still in_use by user-space. */ if (prb_curr_blk_in_use(pbd)) { /* Can't record this packet */ return NULL; } else { /* * Ok, the block was released by user-space. * Now let's open that block. * opening a block also thaws the queue. * Thawing is a side effect. */ prb_open_block(pkc, pbd); } } smp_mb(); curr = pkc->nxt_offset; pkc->skb = skb; end = (char *)pbd + pkc->kblk_size; /* first try the current block */ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* Ok, close the current block */ prb_retire_current_block(pkc, po, 0); /* Now, try to dispatch the next block */ curr = (char *)prb_dispatch_next_block(pkc, po); if (curr) { pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* * No free blocks are available.user_space hasn't caught up yet. * Queue was just frozen and now this packet will get dropped. */ return NULL; } static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { char *curr = NULL; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: curr = packet_lookup_frame(po, &po->rx_ring, po->rx_ring.head, status); return curr; case TPACKET_V3: return __packet_lookup_frame_in_block(po, skb, status, len); default: WARN(1, "TPACKET version not supported\n"); BUG(); return NULL; } } static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); if (status != BLOCK_STATUS(pbd)) return NULL; return pbd; } static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) prev = rb->prb_bdqc.kactive_blk_num-1; else prev = rb->prb_bdqc.knum_blocks-1; return prev; } /* Assumes caller has held the rx_queue.lock */ static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = prb_previous_blk_num(rb); return prb_lookup_block(po, rb, previous, status); } static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { if (po->tp_version <= TPACKET_V2) return packet_previous_frame(po, rb, status); return __prb_previous_block(po, rb, status); } static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: return packet_increment_head(rb); case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return; } } static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static void packet_inc_pending(struct packet_ring_buffer *rb) { this_cpu_inc(*rb->pending_refcnt); } static void packet_dec_pending(struct packet_ring_buffer *rb) { this_cpu_dec(*rb->pending_refcnt); } static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) { unsigned int refcnt = 0; int cpu; /* We don't use pending refcount in rx_ring. */ if (rb->pending_refcnt == NULL) return 0; for_each_possible_cpu(cpu) refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); return refcnt; } static int packet_alloc_pending(struct packet_sock *po) { po->rx_ring.pending_refcnt = NULL; po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); if (unlikely(po->tx_ring.pending_refcnt == NULL)) return -ENOBUFS; return 0; } static void packet_free_pending(struct packet_sock *po) { free_percpu(po->tx_ring.pending_refcnt); } #define ROOM_POW_OFF 2 #define ROOM_NONE 0x0 #define ROOM_LOW 0x1 #define ROOM_NORMAL 0x2 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.frame_max + 1; idx = po->rx_ring.head; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.prb_bdqc.knum_blocks; idx = po->rx_ring.prb_bdqc.kactive_blk_num; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { struct sock *sk = &po->sk; int ret = ROOM_NONE; if (po->prot_hook.func != tpacket_rcv) { int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) - (skb ? skb->truesize : 0); if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) return ROOM_NORMAL; else if (avail > 0) return ROOM_LOW; else return ROOM_NONE; } if (po->tp_version == TPACKET_V3) { if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_v3_has_room(po, 0)) ret = ROOM_LOW; } else { if (__tpacket_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_has_room(po, 0)) ret = ROOM_LOW; } return ret; } static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { int ret; bool has_room; spin_lock_bh(&po->sk.sk_receive_queue.lock); ret = __packet_rcv_has_room(po, skb); has_room = ret == ROOM_NORMAL; if (po->pressure == has_room) po->pressure = !has_room; spin_unlock_bh(&po->sk.sk_receive_queue.lock); return ret; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(refcount_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { u32 rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) if (po->rollover->history[i] == rxhash) count++; po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; return count > (ROLLOVER_HLEN >> 1); } static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { unsigned int val = atomic_inc_return(&f->rr_cur); return val % num; } static unsigned int fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return smp_processor_id() % num; } static unsigned int fanout_demux_rnd(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return prandom_u32_max(num); } static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, bool try_self, unsigned int num) { struct packet_sock *po, *po_next, *po_skip = NULL; unsigned int i, j, room = ROOM_NONE; po = pkt_sk(f->arr[idx]); if (try_self) { room = packet_rcv_has_room(po, skb); if (room == ROOM_NORMAL || (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) return idx; po_skip = po; } i = j = min_t(int, po->rollover->sock, num - 1); do { po_next = pkt_sk(f->arr[i]); if (po_next != po_skip && !po_next->pressure && packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { if (i != j) po->rollover->sock = i; atomic_long_inc(&po->rollover->num); if (room == ROOM_LOW) atomic_long_inc(&po->rollover->num_huge); return i; } if (++i == num) i = 0; } while (i != j); atomic_long_inc(&po->rollover->num_failed); return idx; } static unsigned int fanout_demux_qm(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return skb_get_queue_mapping(skb) % num; } static unsigned int fanout_demux_bpf(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { struct bpf_prog *prog; unsigned int ret = 0; rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; } static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); } static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } switch (f->type) { case PACKET_FANOUT_HASH: default: idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: idx = fanout_demux_lb(f, skb, num); break; case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; case PACKET_FANOUT_RND: idx = fanout_demux_rnd(f, skb, num); break; case PACKET_FANOUT_QM: idx = fanout_demux_qm(f, skb, num); break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: idx = fanout_demux_bpf(f, skb, num); break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) idx = fanout_demux_rollover(f, skb, idx, true, num); po = pkt_sk(f->arr[idx]); return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); } DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); static u16 fanout_next_id; static void __fanout_link(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; spin_lock(&f->lock); f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; if (f->num_members == 1) dev_add_pack(&f->prot_hook); spin_unlock(&f->lock); } static void __fanout_unlink(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; int i; spin_lock(&f->lock); for (i = 0; i < f->num_members; i++) { if (f->arr[i] == sk) break; } BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; if (f->num_members == 0) __dev_remove_pack(&f->prot_hook); spin_unlock(&f->lock); } static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { if (sk->sk_family != PF_PACKET) return false; return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_LB: atomic_set(&f->rr_cur, 0); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: RCU_INIT_POINTER(f->bpf_prog, NULL); break; } } static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) { struct bpf_prog *old; spin_lock(&f->lock); old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); rcu_assign_pointer(f->bpf_prog, new); spin_unlock(&f->lock); if (old) { synchronize_net(); bpf_prog_destroy(old); } } static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; struct sock_fprog fprog; int ret; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fprog)) return -EINVAL; if (copy_from_user(&fprog, data, len)) return -EFAULT; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; u32 fd; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fd)) return -EINVAL; if (copy_from_user(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data(struct packet_sock *po, char __user *data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; }; } static void fanout_release_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); }; } static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) { struct packet_fanout *f; list_for_each_entry(f, &fanout_list, list) { if (f->id == candidate_id && read_pnet(&f->net) == sock_net(sk)) { return false; } } return true; } static bool fanout_find_new_id(struct sock *sk, u16 *new_id) { u16 id = fanout_next_id; do { if (__fanout_id_is_free(sk, id)) { *new_id = id; fanout_next_id = id + 1; return true; } id++; } while (id != fanout_next_id); return false; } static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_rollover *rollover = NULL; struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; u8 flags = type_flags >> 8; int err; switch (type) { case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: break; default: return -EINVAL; } mutex_lock(&fanout_mutex); err = -EINVAL; if (!po->running) goto out; err = -EALREADY; if (po->fanout) goto out; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { err = -ENOMEM; rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); if (!rollover) goto out; atomic_long_set(&rollover->num, 0); atomic_long_set(&rollover->num_huge, 0); atomic_long_set(&rollover->num_failed, 0); po->rollover = rollover; } if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { if (id != 0) { err = -EINVAL; goto out; } if (!fanout_find_new_id(sk, &id)) { err = -ENOMEM; goto out; } /* ephemeral flag for the first socket in the group: drop it */ flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); } match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && read_pnet(&f->net) == sock_net(sk)) { match = f; break; } } err = -EINVAL; if (match && match->flags != flags) goto out; if (!match) { err = -ENOMEM; match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) goto out; write_pnet(&match->net, sock_net(sk)); match->id = id; match->type = type; match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); refcount_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; list_add(&match->list, &fanout_list); } err = -EINVAL; if (match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); __fanout_link(sk, po); err = 0; } } out: if (err && rollover) { kfree(rollover); po->rollover = NULL; } mutex_unlock(&fanout_mutex); return err; } /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. * It is the responsibility of the caller to call fanout_release_data() and * free the returned packet_fanout (after synchronize_net()) */ static struct packet_fanout *fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; mutex_lock(&fanout_mutex); f = po->fanout; if (f) { po->fanout = NULL; if (refcount_dec_and_test(&f->sk_ref)) list_del(&f->list); else f = NULL; if (po->rollover) kfree_rcu(po->rollover, rcu); } mutex_unlock(&fanout_mutex); return f; } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, struct sk_buff *skb) { /* Earlier code assumed this would be a VLAN pkt, double-check * this now that we have the actual packet in hand. We can only * do this check on Ethernet devices. */ if (unlikely(dev->type != ARPHRD_ETHER)) return false; skb_reset_mac_header(skb); return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; } if (!dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_unlock; } if (len > (dev->mtu + dev->hard_header_len + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_unlock; } sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; skb_probe_transport_header(skb, 0); dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static unsigned int run_filter(struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, size_t *len) { struct virtio_net_hdr vnet_hdr; if (*len < sizeof(vnet_hdr)) return -EINVAL; *len -= sizeof(vnet_hdr); if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } consume_skb(skb); skb = nskb; } sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_hatype = dev->type; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). * Use their space for storing the original skb length. */ PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); return 0; drop_n_acct: is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) { netoff += sizeof(struct virtio_net_hdr); do_vnet = true; } macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { snaplen = 0; do_vnet = false; } } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; do_vnet = false; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); if (likely(po->tx_ring.pg_vec)) { void *ph; __u32 ts; ph = skb_shinfo(skb)->destructor_arg; packet_dec_pending(&po->tx_ring); ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); } sock_wfree(skb); } static void tpacket_set_protocol(const struct net_device *dev, struct sk_buff *skb) { if (dev->type == ARPHRD_ETHER) { skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } } static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) { if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) return -EINVAL; return 0; } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, struct virtio_net_hdr *vnet_hdr) { if (*len < sizeof(*vnet_hdr)) return -EINVAL; *len -= sizeof(*vnet_hdr); if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) return -EFAULT; return __packet_snd_vnet_parse(vnet_hdr, *len); } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, __be16 proto, unsigned char *addr, int hlen, int copylen, const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); skb_reset_network_header(skb); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (copylen) { int hdrlen = min_t(int, copylen, tp_len); skb_push(skb, dev->hard_header_len); skb_put(skb, copylen - dev->hard_header_len); err = skb_store_bits(skb, 0, data, hdrlen); if (unlikely(err)) return err; if (!dev_validate_header(dev, skb->data, hdrlen)) return -EINVAL; if (!skb->protocol) tpacket_set_protocol(dev, skb); data += hdrlen; to_write -= hdrlen; } offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; refcount_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } skb_probe_transport_header(skb, 0); return tp_len; } static int tpacket_parse_header(struct packet_sock *po, void *frame, int size_max, void **data) { union tpacket_uhdr ph; int tp_len, off; ph.raw = frame; switch (po->tp_version) { case TPACKET_V3: if (ph.h3->tp_next_offset != 0) { pr_warn_once("variable sized slot not supported"); return -EINVAL; } tp_len = ph.h3->tp_len; break; case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } if (unlikely(po->tp_tx_has_off)) { int off_min, off_max; off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_net; break; case TPACKET_V2: off = ph.h2->tp_net; break; default: off = ph.h1->tp_net; break; } } else { switch (po->tp_version) { case TPACKET_V3: off = ph.h3->tp_mac; break; case TPACKET_V2: off = ph.h2->tp_mac; break; default: off = ph.h1->tp_mac; break; } } if (unlikely((off < off_min) || (off_max < off))) return -EINVAL; } else { off = po->tp_hdrlen - sizeof(struct sockaddr_ll); } *data = frame + off; return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); int tp_len, size_max; unsigned char *addr; void *data; int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; mutex_lock(&po->pg_vec_lock); if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) goto out_put; } if (po->sk.sk_socket->type == SOCK_RAW) reserve = dev->hard_header_len; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { if (need_wait && need_resched()) schedule(); continue; } skb = NULL; tp_len = tpacket_parse_header(po, ph, size_max, &data); if (tp_len < 0) goto tpacket_error; status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (po->has_vnet_hdr) { vnet_hdr = data; data += sizeof(*vnet_hdr); tp_len -= sizeof(*vnet_hdr); if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len); } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll) + (copylen - dev->hard_header_len), !need_wait, &err); if (unlikely(skb == NULL)) { /* we assume the socket was initially writeable ... */ if (likely(len_sum > 0)) err = len_sum; goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; if (unlikely(tp_len < 0)) { tpacket_error: if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; } skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || /* Note: packet_read_pending() might be slow if we have * to call it as it's per_cpu variable, but in fast-path * we already short-circuit the loop with the first * condition, and luckily don't have to go that path * anyway. */ (need_wait && packet_read_pending(&po->tx_ring)))); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, 0); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int err, reserve = 0; struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); int hlen, tlen, linear; int extra_len = 0; /* * Get and verify the address. */ if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out_unlock; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); if (err) goto out_unlock; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) goto out_unlock; err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); linear = max(linear, min_t(int, len, dev->hard_header_len)); skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; if (sock->type == SOCK_RAW && !dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_free; } sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_free; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; if (po->has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) goto out_free; len += sizeof(vnet_hdr); } skb_probe_transport_header(skb, reserve); if (unlikely(extra_len == 4)) skb->no_fcs = 1; err = po->xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct packet_fanout *f; struct net *net; union tpacket_req_u req_u; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); mutex_lock(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); } if (po->tx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } f = fanout_release(sk); synchronize_net(); if (f) { fanout_release_data(f); kfree(f); } /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); packet_free_pending(po); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[sizeof(uaddr->sa_data) + 1]; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; /* uaddr->sa_data comes from the userspace, it's not guaranteed to be * zero-terminated. */ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); name[sizeof(uaddr->sa_data)] = 0; return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol ? : pkt_sk(sk)->num); } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; err = packet_alloc_pending(po); if (err) goto out2; packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); preempt_enable(); return 0; out2: sk_free(sk); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = sock_recv_errqueue(sk, msg, len, SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->pressure) packet_rcv_has_room(pkt_sk(sk), NULL); if (pkt_sk(sk)->has_vnet_hdr) { err = packet_rcv_vnet(msg, skb, &len); if (err) goto out_free; vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries * a user program they can ask the device for its MTU * anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; if (sock->type != SOCK_PACKET) { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { /* If the address length field is there to be filled * in, we fill it in now. */ if (sock->type == SOCK_PACKET) { __sockaddr_check_size(sizeof(struct sockaddr_pkt)); msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); } if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (skb_vlan_tag_present(skb)) { aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist_delete(struct net_device *dev, struct packet_mclist **mlp) { struct packet_mclist *ml; while ((ml = *mlp) != NULL) { if (ml->ifindex == dev->ifindex) { packet_dev_mc(dev, ml, -1); *mlp = ml->next; kfree(ml); } else mlp = &ml->next; } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } break; } } rtnl_unlock(); return 0; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_reserve = val; ret = 0; } release_sock(sk); return ret; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: spin_lock_bh(&sk->sk_receive_queue.lock); memcpy(&st, &po->stats, sizeof(st)); memset(&po->stats, 0, sizeof(po->stats)); spin_unlock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { lv = sizeof(struct tpacket_stats_v3); st.stats3.tp_packets += st.stats3.tp_drops; data = &st.stats3; } else { lv = sizeof(struct tpacket_stats); st.stats1.tp_packets += st.stats1.tp_drops; data = &st.stats1; } break; case PACKET_AUXDATA: val = po->auxdata; break; case PACKET_ORIGDEV: val = po->origdev; break; case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; case PACKET_VERSION: val = po->tp_version; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (len < sizeof(int)) return -EINVAL; if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; case TPACKET_V3: val = sizeof(struct tpacket3_hdr); break; default: return -EINVAL; } break; case PACKET_RESERVE: val = po->tp_reserve; break; case PACKET_LOSS: val = po->tp_loss; break; case PACKET_TIMESTAMP: val = po->tp_tstamp; break; case PACKET_FANOUT: val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16) | ((u32)po->fanout->flags << 24)) : 0); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; rstats.tp_all = atomic_long_read(&po->rollover->num); rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); data = &rstats; lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; break; case PACKET_QDISC_BYPASS: val = packet_use_direct_xmit(po); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int compat_packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct packet_sock *po = pkt_sk(sock->sk); if (level != SOL_PACKET) return -ENOPROTOOPT; if (optname == PACKET_FANOUT_DATA && po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { optval = (char __user *)get_compat_bpf_fprog(optval); if (!optval) return -EFAULT; optlen = sizeof(struct sock_fprog); } return packet_setsockopt(sock, level, optname, optval, optlen); } #endif static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { struct sock *sk; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __unregister_prot_hook(sk, false); sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num) register_prot_hook(sk); spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; lock_sock(sk); rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && req->tp_block_size <= BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Block transmit is not supported yet */ if (!tx_ring) { init_prb_bdqc(po, rb, pg_vec, req_u); } else { struct tpacket_req3 *req3 = &req_u->req3; if (req3->tp_retire_blk_tov || req3->tp_sizeof_priv || req3->tp_feature_req_word) { err = -EINVAL; goto out; } } break; default: break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { po->num = 0; __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { po->num = num; register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: release_sock(sk); return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_packet_setsockopt, #endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, refcount_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { mutex_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { remove_proc_entry("packet", net->proc_net); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-119/c/good_2796_0
crossvul-cpp_data_bad_3615_0
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3615_0
crossvul-cpp_data_good_5493_2
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * The purpose of this test is to minimally exercise libcurl's internal * curl_m*printf formatting capabilities and handling of some data types. */ #include "test.h" #include "memdebug.h" #if (CURL_SIZEOF_CURL_OFF_T > CURL_SIZEOF_LONG) # define MPRNT_SUFFIX_CURL_OFF_T LL #else # define MPRNT_SUFFIX_CURL_OFF_T L #endif #ifdef CURL_ISOCPP # define MPRNT_OFF_T_C_HELPER2(Val,Suffix) Val ## Suffix #else # define MPRNT_OFF_T_C_HELPER2(Val,Suffix) Val/**/Suffix #endif #define MPRNT_OFF_T_C_HELPER1(Val,Suffix) MPRNT_OFF_T_C_HELPER2(Val,Suffix) #define MPRNT_OFF_T_C(Val) MPRNT_OFF_T_C_HELPER1(Val,MPRNT_SUFFIX_CURL_OFF_T) #define BUFSZ 256 #define USHORT_TESTS_ARRSZ 1 + 100 #define SSHORT_TESTS_ARRSZ 1 + 100 #define UINT_TESTS_ARRSZ 1 + 100 #define SINT_TESTS_ARRSZ 1 + 100 #define ULONG_TESTS_ARRSZ 1 + 100 #define SLONG_TESTS_ARRSZ 1 + 100 #define COFFT_TESTS_ARRSZ 1 + 100 struct unsshort_st { unsigned short num; /* unsigned short */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct sigshort_st { short num; /* signed short */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct unsint_st { unsigned int num; /* unsigned int */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct sigint_st { int num; /* signed int */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct unslong_st { unsigned long num; /* unsigned long */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct siglong_st { long num; /* signed long */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; struct curloff_st { curl_off_t num; /* curl_off_t */ const char *expected; /* expected string */ char result[BUFSZ]; /* result string */ }; static struct unsshort_st us_test[USHORT_TESTS_ARRSZ]; static struct sigshort_st ss_test[SSHORT_TESTS_ARRSZ]; static struct unsint_st ui_test[UINT_TESTS_ARRSZ]; static struct sigint_st si_test[SINT_TESTS_ARRSZ]; static struct unslong_st ul_test[ULONG_TESTS_ARRSZ]; static struct siglong_st sl_test[SLONG_TESTS_ARRSZ]; static struct curloff_st co_test[COFFT_TESTS_ARRSZ]; static int test_unsigned_short_formatting(void) { int i, j; int num_ushort_tests; int failed = 0; #if (SIZEOF_SHORT == 1) i=1; us_test[i].num = 0xFFU; us_test[i].expected = "256"; i++; us_test[i].num = 0xF0U; us_test[i].expected = "240"; i++; us_test[i].num = 0x0FU; us_test[i].expected = "15"; i++; us_test[i].num = 0xE0U; us_test[i].expected = "224"; i++; us_test[i].num = 0x0EU; us_test[i].expected = "14"; i++; us_test[i].num = 0xC0U; us_test[i].expected = "192"; i++; us_test[i].num = 0x0CU; us_test[i].expected = "12"; i++; us_test[i].num = 0x01U; us_test[i].expected = "1"; i++; us_test[i].num = 0x00U; us_test[i].expected = "0"; num_ushort_tests = i; #elif (SIZEOF_SHORT == 2) i=1; us_test[i].num = 0xFFFFU; us_test[i].expected = "65535"; i++; us_test[i].num = 0xFF00U; us_test[i].expected = "65280"; i++; us_test[i].num = 0x00FFU; us_test[i].expected = "255"; i++; us_test[i].num = 0xF000U; us_test[i].expected = "61440"; i++; us_test[i].num = 0x0F00U; us_test[i].expected = "3840"; i++; us_test[i].num = 0x00F0U; us_test[i].expected = "240"; i++; us_test[i].num = 0x000FU; us_test[i].expected = "15"; i++; us_test[i].num = 0xC000U; us_test[i].expected = "49152"; i++; us_test[i].num = 0x0C00U; us_test[i].expected = "3072"; i++; us_test[i].num = 0x00C0U; us_test[i].expected = "192"; i++; us_test[i].num = 0x000CU; us_test[i].expected = "12"; i++; us_test[i].num = 0x0001U; us_test[i].expected = "1"; i++; us_test[i].num = 0x0000U; us_test[i].expected = "0"; num_ushort_tests = i; #elif (SIZEOF_SHORT == 4) i=1; us_test[i].num = 0xFFFFFFFFU; us_test[i].expected = "4294967295"; i++; us_test[i].num = 0xFFFF0000U; us_test[i].expected = "4294901760"; i++; us_test[i].num = 0x0000FFFFU; us_test[i].expected = "65535"; i++; us_test[i].num = 0xFF000000U; us_test[i].expected = "4278190080"; i++; us_test[i].num = 0x00FF0000U; us_test[i].expected = "16711680"; i++; us_test[i].num = 0x0000FF00U; us_test[i].expected = "65280"; i++; us_test[i].num = 0x000000FFU; us_test[i].expected = "255"; i++; us_test[i].num = 0xF0000000U; us_test[i].expected = "4026531840"; i++; us_test[i].num = 0x0F000000U; us_test[i].expected = "251658240"; i++; us_test[i].num = 0x00F00000U; us_test[i].expected = "15728640"; i++; us_test[i].num = 0x000F0000U; us_test[i].expected = "983040"; i++; us_test[i].num = 0x0000F000U; us_test[i].expected = "61440"; i++; us_test[i].num = 0x00000F00U; us_test[i].expected = "3840"; i++; us_test[i].num = 0x000000F0U; us_test[i].expected = "240"; i++; us_test[i].num = 0x0000000FU; us_test[i].expected = "15"; i++; us_test[i].num = 0xC0000000U; us_test[i].expected = "3221225472"; i++; us_test[i].num = 0x0C000000U; us_test[i].expected = "201326592"; i++; us_test[i].num = 0x00C00000U; us_test[i].expected = "12582912"; i++; us_test[i].num = 0x000C0000U; us_test[i].expected = "786432"; i++; us_test[i].num = 0x0000C000U; us_test[i].expected = "49152"; i++; us_test[i].num = 0x00000C00U; us_test[i].expected = "3072"; i++; us_test[i].num = 0x000000C0U; us_test[i].expected = "192"; i++; us_test[i].num = 0x0000000CU; us_test[i].expected = "12"; i++; us_test[i].num = 0x00000001U; us_test[i].expected = "1"; i++; us_test[i].num = 0x00000000U; us_test[i].expected = "0"; num_ushort_tests = i; #endif for(i=1; i<=num_ushort_tests; i++) { for(j=0; j<BUFSZ; j++) us_test[i].result[j] = 'X'; us_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(us_test[i].result, "%hu", us_test[i].num); if(memcmp(us_test[i].result, us_test[i].expected, strlen(us_test[i].expected))) { printf("unsigned short test #%.2d: Failed (Expected: %s Got: %s)\n", i, us_test[i].expected, us_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() unsigned short tests OK!\n"); else printf("Some curl_mprintf() unsigned short tests Failed!\n"); return failed; } static int test_signed_short_formatting(void) { int i, j; int num_sshort_tests; int failed = 0; #if (SIZEOF_SHORT == 1) i=1; ss_test[i].num = 0x7F; ss_test[i].expected = "127"; i++; ss_test[i].num = 0x70; ss_test[i].expected = "112"; i++; ss_test[i].num = 0x07; ss_test[i].expected = "7"; i++; ss_test[i].num = 0x50; ss_test[i].expected = "80"; i++; ss_test[i].num = 0x05; ss_test[i].expected = "5"; i++; ss_test[i].num = 0x01; ss_test[i].expected = "1"; i++; ss_test[i].num = 0x00; ss_test[i].expected = "0"; i++; ss_test[i].num = -0x7F -1; ss_test[i].expected = "-128"; i++; ss_test[i].num = -0x70 -1; ss_test[i].expected = "-113"; i++; ss_test[i].num = -0x07 -1; ss_test[i].expected = "-8"; i++; ss_test[i].num = -0x50 -1; ss_test[i].expected = "-81"; i++; ss_test[i].num = -0x05 -1; ss_test[i].expected = "-6"; i++; ss_test[i].num = 0x00 -1; ss_test[i].expected = "-1"; num_sshort_tests = i; #elif (SIZEOF_SHORT == 2) i=1; ss_test[i].num = 0x7FFF; ss_test[i].expected = "32767"; i++; ss_test[i].num = 0x7FFE; ss_test[i].expected = "32766"; i++; ss_test[i].num = 0x7FFD; ss_test[i].expected = "32765"; i++; ss_test[i].num = 0x7F00; ss_test[i].expected = "32512"; i++; ss_test[i].num = 0x07F0; ss_test[i].expected = "2032"; i++; ss_test[i].num = 0x007F; ss_test[i].expected = "127"; i++; ss_test[i].num = 0x7000; ss_test[i].expected = "28672"; i++; ss_test[i].num = 0x0700; ss_test[i].expected = "1792"; i++; ss_test[i].num = 0x0070; ss_test[i].expected = "112"; i++; ss_test[i].num = 0x0007; ss_test[i].expected = "7"; i++; ss_test[i].num = 0x5000; ss_test[i].expected = "20480"; i++; ss_test[i].num = 0x0500; ss_test[i].expected = "1280"; i++; ss_test[i].num = 0x0050; ss_test[i].expected = "80"; i++; ss_test[i].num = 0x0005; ss_test[i].expected = "5"; i++; ss_test[i].num = 0x0001; ss_test[i].expected = "1"; i++; ss_test[i].num = 0x0000; ss_test[i].expected = "0"; i++; ss_test[i].num = -0x7FFF -1; ss_test[i].expected = "-32768"; i++; ss_test[i].num = -0x7FFE -1; ss_test[i].expected = "-32767"; i++; ss_test[i].num = -0x7FFD -1; ss_test[i].expected = "-32766"; i++; ss_test[i].num = -0x7F00 -1; ss_test[i].expected = "-32513"; i++; ss_test[i].num = -0x07F0 -1; ss_test[i].expected = "-2033"; i++; ss_test[i].num = -0x007F -1; ss_test[i].expected = "-128"; i++; ss_test[i].num = -0x7000 -1; ss_test[i].expected = "-28673"; i++; ss_test[i].num = -0x0700 -1; ss_test[i].expected = "-1793"; i++; ss_test[i].num = -0x0070 -1; ss_test[i].expected = "-113"; i++; ss_test[i].num = -0x0007 -1; ss_test[i].expected = "-8"; i++; ss_test[i].num = -0x5000 -1; ss_test[i].expected = "-20481"; i++; ss_test[i].num = -0x0500 -1; ss_test[i].expected = "-1281"; i++; ss_test[i].num = -0x0050 -1; ss_test[i].expected = "-81"; i++; ss_test[i].num = -0x0005 -1; ss_test[i].expected = "-6"; i++; ss_test[i].num = 0x0000 -1; ss_test[i].expected = "-1"; num_sshort_tests = i; #elif (SIZEOF_SHORT == 4) i=1; ss_test[i].num = 0x7FFFFFFF; ss_test[i].expected = "2147483647"; i++; ss_test[i].num = 0x7FFFFFFE; ss_test[i].expected = "2147483646"; i++; ss_test[i].num = 0x7FFFFFFD; ss_test[i].expected = "2147483645"; i++; ss_test[i].num = 0x7FFF0000; ss_test[i].expected = "2147418112"; i++; ss_test[i].num = 0x00007FFF; ss_test[i].expected = "32767"; i++; ss_test[i].num = 0x7F000000; ss_test[i].expected = "2130706432"; i++; ss_test[i].num = 0x007F0000; ss_test[i].expected = "8323072"; i++; ss_test[i].num = 0x00007F00; ss_test[i].expected = "32512"; i++; ss_test[i].num = 0x0000007F; ss_test[i].expected = "127"; i++; ss_test[i].num = 0x70000000; ss_test[i].expected = "1879048192"; i++; ss_test[i].num = 0x07000000; ss_test[i].expected = "117440512"; i++; ss_test[i].num = 0x00700000; ss_test[i].expected = "7340032"; i++; ss_test[i].num = 0x00070000; ss_test[i].expected = "458752"; i++; ss_test[i].num = 0x00007000; ss_test[i].expected = "28672"; i++; ss_test[i].num = 0x00000700; ss_test[i].expected = "1792"; i++; ss_test[i].num = 0x00000070; ss_test[i].expected = "112"; i++; ss_test[i].num = 0x00000007; ss_test[i].expected = "7"; i++; ss_test[i].num = 0x50000000; ss_test[i].expected = "1342177280"; i++; ss_test[i].num = 0x05000000; ss_test[i].expected = "83886080"; i++; ss_test[i].num = 0x00500000; ss_test[i].expected = "5242880"; i++; ss_test[i].num = 0x00050000; ss_test[i].expected = "327680"; i++; ss_test[i].num = 0x00005000; ss_test[i].expected = "20480"; i++; ss_test[i].num = 0x00000500; ss_test[i].expected = "1280"; i++; ss_test[i].num = 0x00000050; ss_test[i].expected = "80"; i++; ss_test[i].num = 0x00000005; ss_test[i].expected = "5"; i++; ss_test[i].num = 0x00000001; ss_test[i].expected = "1"; i++; ss_test[i].num = 0x00000000; ss_test[i].expected = "0"; i++; ss_test[i].num = -0x7FFFFFFF -1; ss_test[i].expected = "-2147483648"; i++; ss_test[i].num = -0x7FFFFFFE -1; ss_test[i].expected = "-2147483647"; i++; ss_test[i].num = -0x7FFFFFFD -1; ss_test[i].expected = "-2147483646"; i++; ss_test[i].num = -0x7FFF0000 -1; ss_test[i].expected = "-2147418113"; i++; ss_test[i].num = -0x00007FFF -1; ss_test[i].expected = "-32768"; i++; ss_test[i].num = -0x7F000000 -1; ss_test[i].expected = "-2130706433"; i++; ss_test[i].num = -0x007F0000 -1; ss_test[i].expected = "-8323073"; i++; ss_test[i].num = -0x00007F00 -1; ss_test[i].expected = "-32513"; i++; ss_test[i].num = -0x0000007F -1; ss_test[i].expected = "-128"; i++; ss_test[i].num = -0x70000000 -1; ss_test[i].expected = "-1879048193"; i++; ss_test[i].num = -0x07000000 -1; ss_test[i].expected = "-117440513"; i++; ss_test[i].num = -0x00700000 -1; ss_test[i].expected = "-7340033"; i++; ss_test[i].num = -0x00070000 -1; ss_test[i].expected = "-458753"; i++; ss_test[i].num = -0x00007000 -1; ss_test[i].expected = "-28673"; i++; ss_test[i].num = -0x00000700 -1; ss_test[i].expected = "-1793"; i++; ss_test[i].num = -0x00000070 -1; ss_test[i].expected = "-113"; i++; ss_test[i].num = -0x00000007 -1; ss_test[i].expected = "-8"; i++; ss_test[i].num = -0x50000000 -1; ss_test[i].expected = "-1342177281"; i++; ss_test[i].num = -0x05000000 -1; ss_test[i].expected = "-83886081"; i++; ss_test[i].num = -0x00500000 -1; ss_test[i].expected = "-5242881"; i++; ss_test[i].num = -0x00050000 -1; ss_test[i].expected = "-327681"; i++; ss_test[i].num = -0x00005000 -1; ss_test[i].expected = "-20481"; i++; ss_test[i].num = -0x00000500 -1; ss_test[i].expected = "-1281"; i++; ss_test[i].num = -0x00000050 -1; ss_test[i].expected = "-81"; i++; ss_test[i].num = -0x00000005 -1; ss_test[i].expected = "-6"; i++; ss_test[i].num = 0x00000000 -1; ss_test[i].expected = "-1"; num_sshort_tests = i; #endif for(i=1; i<=num_sshort_tests; i++) { for(j=0; j<BUFSZ; j++) ss_test[i].result[j] = 'X'; ss_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(ss_test[i].result, "%hd", ss_test[i].num); if(memcmp(ss_test[i].result, ss_test[i].expected, strlen(ss_test[i].expected))) { printf("signed short test #%.2d: Failed (Expected: %s Got: %s)\n", i, ss_test[i].expected, ss_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() signed short tests OK!\n"); else printf("Some curl_mprintf() signed short tests Failed!\n"); return failed; } static int test_unsigned_int_formatting(void) { int i, j; int num_uint_tests; int failed = 0; #if (SIZEOF_INT == 2) i=1; ui_test[i].num = 0xFFFFU; ui_test[i].expected = "65535"; i++; ui_test[i].num = 0xFF00U; ui_test[i].expected = "65280"; i++; ui_test[i].num = 0x00FFU; ui_test[i].expected = "255"; i++; ui_test[i].num = 0xF000U; ui_test[i].expected = "61440"; i++; ui_test[i].num = 0x0F00U; ui_test[i].expected = "3840"; i++; ui_test[i].num = 0x00F0U; ui_test[i].expected = "240"; i++; ui_test[i].num = 0x000FU; ui_test[i].expected = "15"; i++; ui_test[i].num = 0xC000U; ui_test[i].expected = "49152"; i++; ui_test[i].num = 0x0C00U; ui_test[i].expected = "3072"; i++; ui_test[i].num = 0x00C0U; ui_test[i].expected = "192"; i++; ui_test[i].num = 0x000CU; ui_test[i].expected = "12"; i++; ui_test[i].num = 0x0001U; ui_test[i].expected = "1"; i++; ui_test[i].num = 0x0000U; ui_test[i].expected = "0"; num_uint_tests = i; #elif (SIZEOF_INT == 4) i=1; ui_test[i].num = 0xFFFFFFFFU; ui_test[i].expected = "4294967295"; i++; ui_test[i].num = 0xFFFF0000U; ui_test[i].expected = "4294901760"; i++; ui_test[i].num = 0x0000FFFFU; ui_test[i].expected = "65535"; i++; ui_test[i].num = 0xFF000000U; ui_test[i].expected = "4278190080"; i++; ui_test[i].num = 0x00FF0000U; ui_test[i].expected = "16711680"; i++; ui_test[i].num = 0x0000FF00U; ui_test[i].expected = "65280"; i++; ui_test[i].num = 0x000000FFU; ui_test[i].expected = "255"; i++; ui_test[i].num = 0xF0000000U; ui_test[i].expected = "4026531840"; i++; ui_test[i].num = 0x0F000000U; ui_test[i].expected = "251658240"; i++; ui_test[i].num = 0x00F00000U; ui_test[i].expected = "15728640"; i++; ui_test[i].num = 0x000F0000U; ui_test[i].expected = "983040"; i++; ui_test[i].num = 0x0000F000U; ui_test[i].expected = "61440"; i++; ui_test[i].num = 0x00000F00U; ui_test[i].expected = "3840"; i++; ui_test[i].num = 0x000000F0U; ui_test[i].expected = "240"; i++; ui_test[i].num = 0x0000000FU; ui_test[i].expected = "15"; i++; ui_test[i].num = 0xC0000000U; ui_test[i].expected = "3221225472"; i++; ui_test[i].num = 0x0C000000U; ui_test[i].expected = "201326592"; i++; ui_test[i].num = 0x00C00000U; ui_test[i].expected = "12582912"; i++; ui_test[i].num = 0x000C0000U; ui_test[i].expected = "786432"; i++; ui_test[i].num = 0x0000C000U; ui_test[i].expected = "49152"; i++; ui_test[i].num = 0x00000C00U; ui_test[i].expected = "3072"; i++; ui_test[i].num = 0x000000C0U; ui_test[i].expected = "192"; i++; ui_test[i].num = 0x0000000CU; ui_test[i].expected = "12"; i++; ui_test[i].num = 0x00000001U; ui_test[i].expected = "1"; i++; ui_test[i].num = 0x00000000U; ui_test[i].expected = "0"; num_uint_tests = i; #elif (SIZEOF_INT == 8) /* !checksrc! disable LONGLINE all */ i=1; ui_test[i].num = 0xFFFFFFFFFFFFFFFFU; ui_test[i].expected = "18446744073709551615"; i++; ui_test[i].num = 0xFFFFFFFF00000000U; ui_test[i].expected = "18446744069414584320"; i++; ui_test[i].num = 0x00000000FFFFFFFFU; ui_test[i].expected = "4294967295"; i++; ui_test[i].num = 0xFFFF000000000000U; ui_test[i].expected = "18446462598732840960"; i++; ui_test[i].num = 0x0000FFFF00000000U; ui_test[i].expected = "281470681743360"; i++; ui_test[i].num = 0x00000000FFFF0000U; ui_test[i].expected = "4294901760"; i++; ui_test[i].num = 0x000000000000FFFFU; ui_test[i].expected = "65535"; i++; ui_test[i].num = 0xFF00000000000000U; ui_test[i].expected = "18374686479671623680"; i++; ui_test[i].num = 0x00FF000000000000U; ui_test[i].expected = "71776119061217280"; i++; ui_test[i].num = 0x0000FF0000000000U; ui_test[i].expected = "280375465082880"; i++; ui_test[i].num = 0x000000FF00000000U; ui_test[i].expected = "1095216660480"; i++; ui_test[i].num = 0x00000000FF000000U; ui_test[i].expected = "4278190080"; i++; ui_test[i].num = 0x0000000000FF0000U; ui_test[i].expected = "16711680"; i++; ui_test[i].num = 0x000000000000FF00U; ui_test[i].expected = "65280"; i++; ui_test[i].num = 0x00000000000000FFU; ui_test[i].expected = "255"; i++; ui_test[i].num = 0xF000000000000000U; ui_test[i].expected = "17293822569102704640"; i++; ui_test[i].num = 0x0F00000000000000U; ui_test[i].expected = "1080863910568919040"; i++; ui_test[i].num = 0x00F0000000000000U; ui_test[i].expected = "67553994410557440"; i++; ui_test[i].num = 0x000F000000000000U; ui_test[i].expected = "4222124650659840"; i++; ui_test[i].num = 0x0000F00000000000U; ui_test[i].expected = "263882790666240"; i++; ui_test[i].num = 0x00000F0000000000U; ui_test[i].expected = "16492674416640"; i++; ui_test[i].num = 0x000000F000000000U; ui_test[i].expected = "1030792151040"; i++; ui_test[i].num = 0x0000000F00000000U; ui_test[i].expected = "64424509440"; i++; ui_test[i].num = 0x00000000F0000000U; ui_test[i].expected = "4026531840"; i++; ui_test[i].num = 0x000000000F000000U; ui_test[i].expected = "251658240"; i++; ui_test[i].num = 0x0000000000F00000U; ui_test[i].expected = "15728640"; i++; ui_test[i].num = 0x00000000000F0000U; ui_test[i].expected = "983040"; i++; ui_test[i].num = 0x000000000000F000U; ui_test[i].expected = "61440"; i++; ui_test[i].num = 0x0000000000000F00U; ui_test[i].expected = "3840"; i++; ui_test[i].num = 0x00000000000000F0U; ui_test[i].expected = "240"; i++; ui_test[i].num = 0x000000000000000FU; ui_test[i].expected = "15"; i++; ui_test[i].num = 0xC000000000000000U; ui_test[i].expected = "13835058055282163712"; i++; ui_test[i].num = 0x0C00000000000000U; ui_test[i].expected = "864691128455135232"; i++; ui_test[i].num = 0x00C0000000000000U; ui_test[i].expected = "54043195528445952"; i++; ui_test[i].num = 0x000C000000000000U; ui_test[i].expected = "3377699720527872"; i++; ui_test[i].num = 0x0000C00000000000U; ui_test[i].expected = "211106232532992"; i++; ui_test[i].num = 0x00000C0000000000U; ui_test[i].expected = "13194139533312"; i++; ui_test[i].num = 0x000000C000000000U; ui_test[i].expected = "824633720832"; i++; ui_test[i].num = 0x0000000C00000000U; ui_test[i].expected = "51539607552"; i++; ui_test[i].num = 0x00000000C0000000U; ui_test[i].expected = "3221225472"; i++; ui_test[i].num = 0x000000000C000000U; ui_test[i].expected = "201326592"; i++; ui_test[i].num = 0x0000000000C00000U; ui_test[i].expected = "12582912"; i++; ui_test[i].num = 0x00000000000C0000U; ui_test[i].expected = "786432"; i++; ui_test[i].num = 0x000000000000C000U; ui_test[i].expected = "49152"; i++; ui_test[i].num = 0x0000000000000C00U; ui_test[i].expected = "3072"; i++; ui_test[i].num = 0x00000000000000C0U; ui_test[i].expected = "192"; i++; ui_test[i].num = 0x000000000000000CU; ui_test[i].expected = "12"; i++; ui_test[i].num = 0x00000001U; ui_test[i].expected = "1"; i++; ui_test[i].num = 0x00000000U; ui_test[i].expected = "0"; num_uint_tests = i; #endif for(i=1; i<=num_uint_tests; i++) { for(j=0; j<BUFSZ; j++) ui_test[i].result[j] = 'X'; ui_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(ui_test[i].result, "%u", ui_test[i].num); if(memcmp(ui_test[i].result, ui_test[i].expected, strlen(ui_test[i].expected))) { printf("unsigned int test #%.2d: Failed (Expected: %s Got: %s)\n", i, ui_test[i].expected, ui_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() unsigned int tests OK!\n"); else printf("Some curl_mprintf() unsigned int tests Failed!\n"); return failed; } static int test_signed_int_formatting(void) { int i, j; int num_sint_tests; int failed = 0; #if (SIZEOF_INT == 2) i=1; si_test[i].num = 0x7FFF; si_test[i].expected = "32767"; i++; si_test[i].num = 0x7FFE; si_test[i].expected = "32766"; i++; si_test[i].num = 0x7FFD; si_test[i].expected = "32765"; i++; si_test[i].num = 0x7F00; si_test[i].expected = "32512"; i++; si_test[i].num = 0x07F0; si_test[i].expected = "2032"; i++; si_test[i].num = 0x007F; si_test[i].expected = "127"; i++; si_test[i].num = 0x7000; si_test[i].expected = "28672"; i++; si_test[i].num = 0x0700; si_test[i].expected = "1792"; i++; si_test[i].num = 0x0070; si_test[i].expected = "112"; i++; si_test[i].num = 0x0007; si_test[i].expected = "7"; i++; si_test[i].num = 0x5000; si_test[i].expected = "20480"; i++; si_test[i].num = 0x0500; si_test[i].expected = "1280"; i++; si_test[i].num = 0x0050; si_test[i].expected = "80"; i++; si_test[i].num = 0x0005; si_test[i].expected = "5"; i++; si_test[i].num = 0x0001; si_test[i].expected = "1"; i++; si_test[i].num = 0x0000; si_test[i].expected = "0"; i++; si_test[i].num = -0x7FFF -1; si_test[i].expected = "-32768"; i++; si_test[i].num = -0x7FFE -1; si_test[i].expected = "-32767"; i++; si_test[i].num = -0x7FFD -1; si_test[i].expected = "-32766"; i++; si_test[i].num = -0x7F00 -1; si_test[i].expected = "-32513"; i++; si_test[i].num = -0x07F0 -1; si_test[i].expected = "-2033"; i++; si_test[i].num = -0x007F -1; si_test[i].expected = "-128"; i++; si_test[i].num = -0x7000 -1; si_test[i].expected = "-28673"; i++; si_test[i].num = -0x0700 -1; si_test[i].expected = "-1793"; i++; si_test[i].num = -0x0070 -1; si_test[i].expected = "-113"; i++; si_test[i].num = -0x0007 -1; si_test[i].expected = "-8"; i++; si_test[i].num = -0x5000 -1; si_test[i].expected = "-20481"; i++; si_test[i].num = -0x0500 -1; si_test[i].expected = "-1281"; i++; si_test[i].num = -0x0050 -1; si_test[i].expected = "-81"; i++; si_test[i].num = -0x0005 -1; si_test[i].expected = "-6"; i++; si_test[i].num = 0x0000 -1; si_test[i].expected = "-1"; num_sint_tests = i; #elif (SIZEOF_INT == 4) i=1; si_test[i].num = 0x7FFFFFFF; si_test[i].expected = "2147483647"; i++; si_test[i].num = 0x7FFFFFFE; si_test[i].expected = "2147483646"; i++; si_test[i].num = 0x7FFFFFFD; si_test[i].expected = "2147483645"; i++; si_test[i].num = 0x7FFF0000; si_test[i].expected = "2147418112"; i++; si_test[i].num = 0x00007FFF; si_test[i].expected = "32767"; i++; si_test[i].num = 0x7F000000; si_test[i].expected = "2130706432"; i++; si_test[i].num = 0x007F0000; si_test[i].expected = "8323072"; i++; si_test[i].num = 0x00007F00; si_test[i].expected = "32512"; i++; si_test[i].num = 0x0000007F; si_test[i].expected = "127"; i++; si_test[i].num = 0x70000000; si_test[i].expected = "1879048192"; i++; si_test[i].num = 0x07000000; si_test[i].expected = "117440512"; i++; si_test[i].num = 0x00700000; si_test[i].expected = "7340032"; i++; si_test[i].num = 0x00070000; si_test[i].expected = "458752"; i++; si_test[i].num = 0x00007000; si_test[i].expected = "28672"; i++; si_test[i].num = 0x00000700; si_test[i].expected = "1792"; i++; si_test[i].num = 0x00000070; si_test[i].expected = "112"; i++; si_test[i].num = 0x00000007; si_test[i].expected = "7"; i++; si_test[i].num = 0x50000000; si_test[i].expected = "1342177280"; i++; si_test[i].num = 0x05000000; si_test[i].expected = "83886080"; i++; si_test[i].num = 0x00500000; si_test[i].expected = "5242880"; i++; si_test[i].num = 0x00050000; si_test[i].expected = "327680"; i++; si_test[i].num = 0x00005000; si_test[i].expected = "20480"; i++; si_test[i].num = 0x00000500; si_test[i].expected = "1280"; i++; si_test[i].num = 0x00000050; si_test[i].expected = "80"; i++; si_test[i].num = 0x00000005; si_test[i].expected = "5"; i++; si_test[i].num = 0x00000001; si_test[i].expected = "1"; i++; si_test[i].num = 0x00000000; si_test[i].expected = "0"; i++; si_test[i].num = -0x7FFFFFFF -1; si_test[i].expected = "-2147483648"; i++; si_test[i].num = -0x7FFFFFFE -1; si_test[i].expected = "-2147483647"; i++; si_test[i].num = -0x7FFFFFFD -1; si_test[i].expected = "-2147483646"; i++; si_test[i].num = -0x7FFF0000 -1; si_test[i].expected = "-2147418113"; i++; si_test[i].num = -0x00007FFF -1; si_test[i].expected = "-32768"; i++; si_test[i].num = -0x7F000000 -1; si_test[i].expected = "-2130706433"; i++; si_test[i].num = -0x007F0000 -1; si_test[i].expected = "-8323073"; i++; si_test[i].num = -0x00007F00 -1; si_test[i].expected = "-32513"; i++; si_test[i].num = -0x0000007F -1; si_test[i].expected = "-128"; i++; si_test[i].num = -0x70000000 -1; si_test[i].expected = "-1879048193"; i++; si_test[i].num = -0x07000000 -1; si_test[i].expected = "-117440513"; i++; si_test[i].num = -0x00700000 -1; si_test[i].expected = "-7340033"; i++; si_test[i].num = -0x00070000 -1; si_test[i].expected = "-458753"; i++; si_test[i].num = -0x00007000 -1; si_test[i].expected = "-28673"; i++; si_test[i].num = -0x00000700 -1; si_test[i].expected = "-1793"; i++; si_test[i].num = -0x00000070 -1; si_test[i].expected = "-113"; i++; si_test[i].num = -0x00000007 -1; si_test[i].expected = "-8"; i++; si_test[i].num = -0x50000000 -1; si_test[i].expected = "-1342177281"; i++; si_test[i].num = -0x05000000 -1; si_test[i].expected = "-83886081"; i++; si_test[i].num = -0x00500000 -1; si_test[i].expected = "-5242881"; i++; si_test[i].num = -0x00050000 -1; si_test[i].expected = "-327681"; i++; si_test[i].num = -0x00005000 -1; si_test[i].expected = "-20481"; i++; si_test[i].num = -0x00000500 -1; si_test[i].expected = "-1281"; i++; si_test[i].num = -0x00000050 -1; si_test[i].expected = "-81"; i++; si_test[i].num = -0x00000005 -1; si_test[i].expected = "-6"; i++; si_test[i].num = 0x00000000 -1; si_test[i].expected = "-1"; num_sint_tests = i; #elif (SIZEOF_INT == 8) i=1; si_test[i].num = 0x7FFFFFFFFFFFFFFF; si_test[i].expected = "9223372036854775807"; i++; si_test[i].num = 0x7FFFFFFFFFFFFFFE; si_test[i].expected = "9223372036854775806"; i++; si_test[i].num = 0x7FFFFFFFFFFFFFFD; si_test[i].expected = "9223372036854775805"; i++; si_test[i].num = 0x7FFFFFFF00000000; si_test[i].expected = "9223372032559808512"; i++; si_test[i].num = 0x000000007FFFFFFF; si_test[i].expected = "2147483647"; i++; si_test[i].num = 0x7FFF000000000000; si_test[i].expected = "9223090561878065152"; i++; si_test[i].num = 0x00007FFF00000000; si_test[i].expected = "140733193388032"; i++; si_test[i].num = 0x000000007FFF0000; si_test[i].expected = "2147418112"; i++; si_test[i].num = 0x0000000000007FFF; si_test[i].expected = "32767"; i++; si_test[i].num = 0x7F00000000000000; si_test[i].expected = "9151314442816847872"; i++; si_test[i].num = 0x007F000000000000; si_test[i].expected = "35747322042253312"; i++; si_test[i].num = 0x00007F0000000000; si_test[i].expected = "139637976727552"; i++; si_test[i].num = 0x0000007F00000000; si_test[i].expected = "545460846592"; i++; si_test[i].num = 0x000000007F000000; si_test[i].expected = "2130706432"; i++; si_test[i].num = 0x00000000007F0000; si_test[i].expected = "8323072"; i++; si_test[i].num = 0x0000000000007F00; si_test[i].expected = "32512"; i++; si_test[i].num = 0x000000000000007F; si_test[i].expected = "127"; i++; si_test[i].num = 0x7000000000000000; si_test[i].expected = "8070450532247928832"; i++; si_test[i].num = 0x0700000000000000; si_test[i].expected = "504403158265495552"; i++; si_test[i].num = 0x0070000000000000; si_test[i].expected = "31525197391593472"; i++; si_test[i].num = 0x0007000000000000; si_test[i].expected = "1970324836974592"; i++; si_test[i].num = 0x0000700000000000; si_test[i].expected = "123145302310912"; i++; si_test[i].num = 0x0000070000000000; si_test[i].expected = "7696581394432"; i++; si_test[i].num = 0x0000007000000000; si_test[i].expected = "481036337152"; i++; si_test[i].num = 0x0000000700000000; si_test[i].expected = "30064771072"; i++; si_test[i].num = 0x0000000070000000; si_test[i].expected = "1879048192"; i++; si_test[i].num = 0x0000000007000000; si_test[i].expected = "117440512"; i++; si_test[i].num = 0x0000000000700000; si_test[i].expected = "7340032"; i++; si_test[i].num = 0x0000000000070000; si_test[i].expected = "458752"; i++; si_test[i].num = 0x0000000000007000; si_test[i].expected = "28672"; i++; si_test[i].num = 0x0000000000000700; si_test[i].expected = "1792"; i++; si_test[i].num = 0x0000000000000070; si_test[i].expected = "112"; i++; si_test[i].num = 0x0000000000000007; si_test[i].expected = "7"; i++; si_test[i].num = 0x0000000000000001; si_test[i].expected = "1"; i++; si_test[i].num = 0x0000000000000000; si_test[i].expected = "0"; i++; si_test[i].num = -0x7FFFFFFFFFFFFFFF -1; si_test[i].expected = "-9223372036854775808"; i++; si_test[i].num = -0x7FFFFFFFFFFFFFFE -1; si_test[i].expected = "-9223372036854775807"; i++; si_test[i].num = -0x7FFFFFFFFFFFFFFD -1; si_test[i].expected = "-9223372036854775806"; i++; si_test[i].num = -0x7FFFFFFF00000000 -1; si_test[i].expected = "-9223372032559808513"; i++; si_test[i].num = -0x000000007FFFFFFF -1; si_test[i].expected = "-2147483648"; i++; si_test[i].num = -0x7FFF000000000000 -1; si_test[i].expected = "-9223090561878065153"; i++; si_test[i].num = -0x00007FFF00000000 -1; si_test[i].expected = "-140733193388033"; i++; si_test[i].num = -0x000000007FFF0000 -1; si_test[i].expected = "-2147418113"; i++; si_test[i].num = -0x0000000000007FFF -1; si_test[i].expected = "-32768"; i++; si_test[i].num = -0x7F00000000000000 -1; si_test[i].expected = "-9151314442816847873"; i++; si_test[i].num = -0x007F000000000000 -1; si_test[i].expected = "-35747322042253313"; i++; si_test[i].num = -0x00007F0000000000 -1; si_test[i].expected = "-139637976727553"; i++; si_test[i].num = -0x0000007F00000000 -1; si_test[i].expected = "-545460846593"; i++; si_test[i].num = -0x000000007F000000 -1; si_test[i].expected = "-2130706433"; i++; si_test[i].num = -0x00000000007F0000 -1; si_test[i].expected = "-8323073"; i++; si_test[i].num = -0x0000000000007F00 -1; si_test[i].expected = "-32513"; i++; si_test[i].num = -0x000000000000007F -1; si_test[i].expected = "-128"; i++; si_test[i].num = -0x7000000000000000 -1; si_test[i].expected = "-8070450532247928833"; i++; si_test[i].num = -0x0700000000000000 -1; si_test[i].expected = "-504403158265495553"; i++; si_test[i].num = -0x0070000000000000 -1; si_test[i].expected = "-31525197391593473"; i++; si_test[i].num = -0x0007000000000000 -1; si_test[i].expected = "-1970324836974593"; i++; si_test[i].num = -0x0000700000000000 -1; si_test[i].expected = "-123145302310913"; i++; si_test[i].num = -0x0000070000000000 -1; si_test[i].expected = "-7696581394433"; i++; si_test[i].num = -0x0000007000000000 -1; si_test[i].expected = "-481036337153"; i++; si_test[i].num = -0x0000000700000000 -1; si_test[i].expected = "-30064771073"; i++; si_test[i].num = -0x0000000070000000 -1; si_test[i].expected = "-1879048193"; i++; si_test[i].num = -0x0000000007000000 -1; si_test[i].expected = "-117440513"; i++; si_test[i].num = -0x0000000000700000 -1; si_test[i].expected = "-7340033"; i++; si_test[i].num = -0x0000000000070000 -1; si_test[i].expected = "-458753"; i++; si_test[i].num = -0x0000000000007000 -1; si_test[i].expected = "-28673"; i++; si_test[i].num = -0x0000000000000700 -1; si_test[i].expected = "-1793"; i++; si_test[i].num = -0x0000000000000070 -1; si_test[i].expected = "-113"; i++; si_test[i].num = -0x0000000000000007 -1; si_test[i].expected = "-8"; i++; si_test[i].num = 0x0000000000000000 -1; si_test[i].expected = "-1"; num_sint_tests = i; #endif for(i=1; i<=num_sint_tests; i++) { for(j=0; j<BUFSZ; j++) si_test[i].result[j] = 'X'; si_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(si_test[i].result, "%d", si_test[i].num); if(memcmp(si_test[i].result, si_test[i].expected, strlen(si_test[i].expected))) { printf("signed int test #%.2d: Failed (Expected: %s Got: %s)\n", i, si_test[i].expected, si_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() signed int tests OK!\n"); else printf("Some curl_mprintf() signed int tests Failed!\n"); return failed; } static int test_unsigned_long_formatting(void) { int i, j; int num_ulong_tests; int failed = 0; #if (CURL_SIZEOF_LONG == 2) i=1; ul_test[i].num = 0xFFFFUL; ul_test[i].expected = "65535"; i++; ul_test[i].num = 0xFF00UL; ul_test[i].expected = "65280"; i++; ul_test[i].num = 0x00FFUL; ul_test[i].expected = "255"; i++; ul_test[i].num = 0xF000UL; ul_test[i].expected = "61440"; i++; ul_test[i].num = 0x0F00UL; ul_test[i].expected = "3840"; i++; ul_test[i].num = 0x00F0UL; ul_test[i].expected = "240"; i++; ul_test[i].num = 0x000FUL; ul_test[i].expected = "15"; i++; ul_test[i].num = 0xC000UL; ul_test[i].expected = "49152"; i++; ul_test[i].num = 0x0C00UL; ul_test[i].expected = "3072"; i++; ul_test[i].num = 0x00C0UL; ul_test[i].expected = "192"; i++; ul_test[i].num = 0x000CUL; ul_test[i].expected = "12"; i++; ul_test[i].num = 0x0001UL; ul_test[i].expected = "1"; i++; ul_test[i].num = 0x0000UL; ul_test[i].expected = "0"; num_ulong_tests = i; #elif (CURL_SIZEOF_LONG == 4) i=1; ul_test[i].num = 0xFFFFFFFFUL; ul_test[i].expected = "4294967295"; i++; ul_test[i].num = 0xFFFF0000UL; ul_test[i].expected = "4294901760"; i++; ul_test[i].num = 0x0000FFFFUL; ul_test[i].expected = "65535"; i++; ul_test[i].num = 0xFF000000UL; ul_test[i].expected = "4278190080"; i++; ul_test[i].num = 0x00FF0000UL; ul_test[i].expected = "16711680"; i++; ul_test[i].num = 0x0000FF00UL; ul_test[i].expected = "65280"; i++; ul_test[i].num = 0x000000FFUL; ul_test[i].expected = "255"; i++; ul_test[i].num = 0xF0000000UL; ul_test[i].expected = "4026531840"; i++; ul_test[i].num = 0x0F000000UL; ul_test[i].expected = "251658240"; i++; ul_test[i].num = 0x00F00000UL; ul_test[i].expected = "15728640"; i++; ul_test[i].num = 0x000F0000UL; ul_test[i].expected = "983040"; i++; ul_test[i].num = 0x0000F000UL; ul_test[i].expected = "61440"; i++; ul_test[i].num = 0x00000F00UL; ul_test[i].expected = "3840"; i++; ul_test[i].num = 0x000000F0UL; ul_test[i].expected = "240"; i++; ul_test[i].num = 0x0000000FUL; ul_test[i].expected = "15"; i++; ul_test[i].num = 0xC0000000UL; ul_test[i].expected = "3221225472"; i++; ul_test[i].num = 0x0C000000UL; ul_test[i].expected = "201326592"; i++; ul_test[i].num = 0x00C00000UL; ul_test[i].expected = "12582912"; i++; ul_test[i].num = 0x000C0000UL; ul_test[i].expected = "786432"; i++; ul_test[i].num = 0x0000C000UL; ul_test[i].expected = "49152"; i++; ul_test[i].num = 0x00000C00UL; ul_test[i].expected = "3072"; i++; ul_test[i].num = 0x000000C0UL; ul_test[i].expected = "192"; i++; ul_test[i].num = 0x0000000CUL; ul_test[i].expected = "12"; i++; ul_test[i].num = 0x00000001UL; ul_test[i].expected = "1"; i++; ul_test[i].num = 0x00000000UL; ul_test[i].expected = "0"; num_ulong_tests = i; #elif (CURL_SIZEOF_LONG == 8) i=1; ul_test[i].num = 0xFFFFFFFFFFFFFFFFUL; ul_test[i].expected = "18446744073709551615"; i++; ul_test[i].num = 0xFFFFFFFF00000000UL; ul_test[i].expected = "18446744069414584320"; i++; ul_test[i].num = 0x00000000FFFFFFFFUL; ul_test[i].expected = "4294967295"; i++; ul_test[i].num = 0xFFFF000000000000UL; ul_test[i].expected = "18446462598732840960"; i++; ul_test[i].num = 0x0000FFFF00000000UL; ul_test[i].expected = "281470681743360"; i++; ul_test[i].num = 0x00000000FFFF0000UL; ul_test[i].expected = "4294901760"; i++; ul_test[i].num = 0x000000000000FFFFUL; ul_test[i].expected = "65535"; i++; ul_test[i].num = 0xFF00000000000000UL; ul_test[i].expected = "18374686479671623680"; i++; ul_test[i].num = 0x00FF000000000000UL; ul_test[i].expected = "71776119061217280"; i++; ul_test[i].num = 0x0000FF0000000000UL; ul_test[i].expected = "280375465082880"; i++; ul_test[i].num = 0x000000FF00000000UL; ul_test[i].expected = "1095216660480"; i++; ul_test[i].num = 0x00000000FF000000UL; ul_test[i].expected = "4278190080"; i++; ul_test[i].num = 0x0000000000FF0000UL; ul_test[i].expected = "16711680"; i++; ul_test[i].num = 0x000000000000FF00UL; ul_test[i].expected = "65280"; i++; ul_test[i].num = 0x00000000000000FFUL; ul_test[i].expected = "255"; i++; ul_test[i].num = 0xF000000000000000UL; ul_test[i].expected = "17293822569102704640"; i++; ul_test[i].num = 0x0F00000000000000UL; ul_test[i].expected = "1080863910568919040"; i++; ul_test[i].num = 0x00F0000000000000UL; ul_test[i].expected = "67553994410557440"; i++; ul_test[i].num = 0x000F000000000000UL; ul_test[i].expected = "4222124650659840"; i++; ul_test[i].num = 0x0000F00000000000UL; ul_test[i].expected = "263882790666240"; i++; ul_test[i].num = 0x00000F0000000000UL; ul_test[i].expected = "16492674416640"; i++; ul_test[i].num = 0x000000F000000000UL; ul_test[i].expected = "1030792151040"; i++; ul_test[i].num = 0x0000000F00000000UL; ul_test[i].expected = "64424509440"; i++; ul_test[i].num = 0x00000000F0000000UL; ul_test[i].expected = "4026531840"; i++; ul_test[i].num = 0x000000000F000000UL; ul_test[i].expected = "251658240"; i++; ul_test[i].num = 0x0000000000F00000UL; ul_test[i].expected = "15728640"; i++; ul_test[i].num = 0x00000000000F0000UL; ul_test[i].expected = "983040"; i++; ul_test[i].num = 0x000000000000F000UL; ul_test[i].expected = "61440"; i++; ul_test[i].num = 0x0000000000000F00UL; ul_test[i].expected = "3840"; i++; ul_test[i].num = 0x00000000000000F0UL; ul_test[i].expected = "240"; i++; ul_test[i].num = 0x000000000000000FUL; ul_test[i].expected = "15"; i++; ul_test[i].num = 0xC000000000000000UL; ul_test[i].expected = "13835058055282163712"; i++; ul_test[i].num = 0x0C00000000000000UL; ul_test[i].expected = "864691128455135232"; i++; ul_test[i].num = 0x00C0000000000000UL; ul_test[i].expected = "54043195528445952"; i++; ul_test[i].num = 0x000C000000000000UL; ul_test[i].expected = "3377699720527872"; i++; ul_test[i].num = 0x0000C00000000000UL; ul_test[i].expected = "211106232532992"; i++; ul_test[i].num = 0x00000C0000000000UL; ul_test[i].expected = "13194139533312"; i++; ul_test[i].num = 0x000000C000000000UL; ul_test[i].expected = "824633720832"; i++; ul_test[i].num = 0x0000000C00000000UL; ul_test[i].expected = "51539607552"; i++; ul_test[i].num = 0x00000000C0000000UL; ul_test[i].expected = "3221225472"; i++; ul_test[i].num = 0x000000000C000000UL; ul_test[i].expected = "201326592"; i++; ul_test[i].num = 0x0000000000C00000UL; ul_test[i].expected = "12582912"; i++; ul_test[i].num = 0x00000000000C0000UL; ul_test[i].expected = "786432"; i++; ul_test[i].num = 0x000000000000C000UL; ul_test[i].expected = "49152"; i++; ul_test[i].num = 0x0000000000000C00UL; ul_test[i].expected = "3072"; i++; ul_test[i].num = 0x00000000000000C0UL; ul_test[i].expected = "192"; i++; ul_test[i].num = 0x000000000000000CUL; ul_test[i].expected = "12"; i++; ul_test[i].num = 0x00000001UL; ul_test[i].expected = "1"; i++; ul_test[i].num = 0x00000000UL; ul_test[i].expected = "0"; num_ulong_tests = i; #endif for(i=1; i<=num_ulong_tests; i++) { for(j=0; j<BUFSZ; j++) ul_test[i].result[j] = 'X'; ul_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(ul_test[i].result, "%lu", ul_test[i].num); if(memcmp(ul_test[i].result, ul_test[i].expected, strlen(ul_test[i].expected))) { printf("unsigned long test #%.2d: Failed (Expected: %s Got: %s)\n", i, ul_test[i].expected, ul_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() unsigned long tests OK!\n"); else printf("Some curl_mprintf() unsigned long tests Failed!\n"); return failed; } static int test_signed_long_formatting(void) { int i, j; int num_slong_tests; int failed = 0; #if (CURL_SIZEOF_LONG == 2) i=1; sl_test[i].num = 0x7FFFL; sl_test[i].expected = "32767"; i++; sl_test[i].num = 0x7FFEL; sl_test[i].expected = "32766"; i++; sl_test[i].num = 0x7FFDL; sl_test[i].expected = "32765"; i++; sl_test[i].num = 0x7F00L; sl_test[i].expected = "32512"; i++; sl_test[i].num = 0x07F0L; sl_test[i].expected = "2032"; i++; sl_test[i].num = 0x007FL; sl_test[i].expected = "127"; i++; sl_test[i].num = 0x7000L; sl_test[i].expected = "28672"; i++; sl_test[i].num = 0x0700L; sl_test[i].expected = "1792"; i++; sl_test[i].num = 0x0070L; sl_test[i].expected = "112"; i++; sl_test[i].num = 0x0007L; sl_test[i].expected = "7"; i++; sl_test[i].num = 0x5000L; sl_test[i].expected = "20480"; i++; sl_test[i].num = 0x0500L; sl_test[i].expected = "1280"; i++; sl_test[i].num = 0x0050L; sl_test[i].expected = "80"; i++; sl_test[i].num = 0x0005L; sl_test[i].expected = "5"; i++; sl_test[i].num = 0x0001L; sl_test[i].expected = "1"; i++; sl_test[i].num = 0x0000L; sl_test[i].expected = "0"; i++; sl_test[i].num = -0x7FFFL -1L; sl_test[i].expected = "-32768"; i++; sl_test[i].num = -0x7FFEL -1L; sl_test[i].expected = "-32767"; i++; sl_test[i].num = -0x7FFDL -1L; sl_test[i].expected = "-32766"; i++; sl_test[i].num = -0x7F00L -1L; sl_test[i].expected = "-32513"; i++; sl_test[i].num = -0x07F0L -1L; sl_test[i].expected = "-2033"; i++; sl_test[i].num = -0x007FL -1L; sl_test[i].expected = "-128"; i++; sl_test[i].num = -0x7000L -1L; sl_test[i].expected = "-28673"; i++; sl_test[i].num = -0x0700L -1L; sl_test[i].expected = "-1793"; i++; sl_test[i].num = -0x0070L -1L; sl_test[i].expected = "-113"; i++; sl_test[i].num = -0x0007L -1L; sl_test[i].expected = "-8"; i++; sl_test[i].num = -0x5000L -1L; sl_test[i].expected = "-20481"; i++; sl_test[i].num = -0x0500L -1L; sl_test[i].expected = "-1281"; i++; sl_test[i].num = -0x0050L -1L; sl_test[i].expected = "-81"; i++; sl_test[i].num = -0x0005L -1L; sl_test[i].expected = "-6"; i++; sl_test[i].num = 0x0000L -1L; sl_test[i].expected = "-1"; num_slong_tests = i; #elif (CURL_SIZEOF_LONG == 4) i=1; sl_test[i].num = 0x7FFFFFFFL; sl_test[i].expected = "2147483647"; i++; sl_test[i].num = 0x7FFFFFFEL; sl_test[i].expected = "2147483646"; i++; sl_test[i].num = 0x7FFFFFFDL; sl_test[i].expected = "2147483645"; i++; sl_test[i].num = 0x7FFF0000L; sl_test[i].expected = "2147418112"; i++; sl_test[i].num = 0x00007FFFL; sl_test[i].expected = "32767"; i++; sl_test[i].num = 0x7F000000L; sl_test[i].expected = "2130706432"; i++; sl_test[i].num = 0x007F0000L; sl_test[i].expected = "8323072"; i++; sl_test[i].num = 0x00007F00L; sl_test[i].expected = "32512"; i++; sl_test[i].num = 0x0000007FL; sl_test[i].expected = "127"; i++; sl_test[i].num = 0x70000000L; sl_test[i].expected = "1879048192"; i++; sl_test[i].num = 0x07000000L; sl_test[i].expected = "117440512"; i++; sl_test[i].num = 0x00700000L; sl_test[i].expected = "7340032"; i++; sl_test[i].num = 0x00070000L; sl_test[i].expected = "458752"; i++; sl_test[i].num = 0x00007000L; sl_test[i].expected = "28672"; i++; sl_test[i].num = 0x00000700L; sl_test[i].expected = "1792"; i++; sl_test[i].num = 0x00000070L; sl_test[i].expected = "112"; i++; sl_test[i].num = 0x00000007L; sl_test[i].expected = "7"; i++; sl_test[i].num = 0x50000000L; sl_test[i].expected = "1342177280"; i++; sl_test[i].num = 0x05000000L; sl_test[i].expected = "83886080"; i++; sl_test[i].num = 0x00500000L; sl_test[i].expected = "5242880"; i++; sl_test[i].num = 0x00050000L; sl_test[i].expected = "327680"; i++; sl_test[i].num = 0x00005000L; sl_test[i].expected = "20480"; i++; sl_test[i].num = 0x00000500L; sl_test[i].expected = "1280"; i++; sl_test[i].num = 0x00000050L; sl_test[i].expected = "80"; i++; sl_test[i].num = 0x00000005L; sl_test[i].expected = "5"; i++; sl_test[i].num = 0x00000001L; sl_test[i].expected = "1"; i++; sl_test[i].num = 0x00000000L; sl_test[i].expected = "0"; i++; sl_test[i].num = -0x7FFFFFFFL -1L; sl_test[i].expected = "-2147483648"; i++; sl_test[i].num = -0x7FFFFFFEL -1L; sl_test[i].expected = "-2147483647"; i++; sl_test[i].num = -0x7FFFFFFDL -1L; sl_test[i].expected = "-2147483646"; i++; sl_test[i].num = -0x7FFF0000L -1L; sl_test[i].expected = "-2147418113"; i++; sl_test[i].num = -0x00007FFFL -1L; sl_test[i].expected = "-32768"; i++; sl_test[i].num = -0x7F000000L -1L; sl_test[i].expected = "-2130706433"; i++; sl_test[i].num = -0x007F0000L -1L; sl_test[i].expected = "-8323073"; i++; sl_test[i].num = -0x00007F00L -1L; sl_test[i].expected = "-32513"; i++; sl_test[i].num = -0x0000007FL -1L; sl_test[i].expected = "-128"; i++; sl_test[i].num = -0x70000000L -1L; sl_test[i].expected = "-1879048193"; i++; sl_test[i].num = -0x07000000L -1L; sl_test[i].expected = "-117440513"; i++; sl_test[i].num = -0x00700000L -1L; sl_test[i].expected = "-7340033"; i++; sl_test[i].num = -0x00070000L -1L; sl_test[i].expected = "-458753"; i++; sl_test[i].num = -0x00007000L -1L; sl_test[i].expected = "-28673"; i++; sl_test[i].num = -0x00000700L -1L; sl_test[i].expected = "-1793"; i++; sl_test[i].num = -0x00000070L -1L; sl_test[i].expected = "-113"; i++; sl_test[i].num = -0x00000007L -1L; sl_test[i].expected = "-8"; i++; sl_test[i].num = -0x50000000L -1L; sl_test[i].expected = "-1342177281"; i++; sl_test[i].num = -0x05000000L -1L; sl_test[i].expected = "-83886081"; i++; sl_test[i].num = -0x00500000L -1L; sl_test[i].expected = "-5242881"; i++; sl_test[i].num = -0x00050000L -1L; sl_test[i].expected = "-327681"; i++; sl_test[i].num = -0x00005000L -1L; sl_test[i].expected = "-20481"; i++; sl_test[i].num = -0x00000500L -1L; sl_test[i].expected = "-1281"; i++; sl_test[i].num = -0x00000050L -1L; sl_test[i].expected = "-81"; i++; sl_test[i].num = -0x00000005L -1L; sl_test[i].expected = "-6"; i++; sl_test[i].num = 0x00000000L -1L; sl_test[i].expected = "-1"; num_slong_tests = i; #elif (CURL_SIZEOF_LONG == 8) i=1; sl_test[i].num = 0x7FFFFFFFFFFFFFFFL; sl_test[i].expected = "9223372036854775807"; i++; sl_test[i].num = 0x7FFFFFFFFFFFFFFEL; sl_test[i].expected = "9223372036854775806"; i++; sl_test[i].num = 0x7FFFFFFFFFFFFFFDL; sl_test[i].expected = "9223372036854775805"; i++; sl_test[i].num = 0x7FFFFFFF00000000L; sl_test[i].expected = "9223372032559808512"; i++; sl_test[i].num = 0x000000007FFFFFFFL; sl_test[i].expected = "2147483647"; i++; sl_test[i].num = 0x7FFF000000000000L; sl_test[i].expected = "9223090561878065152"; i++; sl_test[i].num = 0x00007FFF00000000L; sl_test[i].expected = "140733193388032"; i++; sl_test[i].num = 0x000000007FFF0000L; sl_test[i].expected = "2147418112"; i++; sl_test[i].num = 0x0000000000007FFFL; sl_test[i].expected = "32767"; i++; sl_test[i].num = 0x7F00000000000000L; sl_test[i].expected = "9151314442816847872"; i++; sl_test[i].num = 0x007F000000000000L; sl_test[i].expected = "35747322042253312"; i++; sl_test[i].num = 0x00007F0000000000L; sl_test[i].expected = "139637976727552"; i++; sl_test[i].num = 0x0000007F00000000L; sl_test[i].expected = "545460846592"; i++; sl_test[i].num = 0x000000007F000000L; sl_test[i].expected = "2130706432"; i++; sl_test[i].num = 0x00000000007F0000L; sl_test[i].expected = "8323072"; i++; sl_test[i].num = 0x0000000000007F00L; sl_test[i].expected = "32512"; i++; sl_test[i].num = 0x000000000000007FL; sl_test[i].expected = "127"; i++; sl_test[i].num = 0x7000000000000000L; sl_test[i].expected = "8070450532247928832"; i++; sl_test[i].num = 0x0700000000000000L; sl_test[i].expected = "504403158265495552"; i++; sl_test[i].num = 0x0070000000000000L; sl_test[i].expected = "31525197391593472"; i++; sl_test[i].num = 0x0007000000000000L; sl_test[i].expected = "1970324836974592"; i++; sl_test[i].num = 0x0000700000000000L; sl_test[i].expected = "123145302310912"; i++; sl_test[i].num = 0x0000070000000000L; sl_test[i].expected = "7696581394432"; i++; sl_test[i].num = 0x0000007000000000L; sl_test[i].expected = "481036337152"; i++; sl_test[i].num = 0x0000000700000000L; sl_test[i].expected = "30064771072"; i++; sl_test[i].num = 0x0000000070000000L; sl_test[i].expected = "1879048192"; i++; sl_test[i].num = 0x0000000007000000L; sl_test[i].expected = "117440512"; i++; sl_test[i].num = 0x0000000000700000L; sl_test[i].expected = "7340032"; i++; sl_test[i].num = 0x0000000000070000L; sl_test[i].expected = "458752"; i++; sl_test[i].num = 0x0000000000007000L; sl_test[i].expected = "28672"; i++; sl_test[i].num = 0x0000000000000700L; sl_test[i].expected = "1792"; i++; sl_test[i].num = 0x0000000000000070L; sl_test[i].expected = "112"; i++; sl_test[i].num = 0x0000000000000007L; sl_test[i].expected = "7"; i++; sl_test[i].num = 0x0000000000000001L; sl_test[i].expected = "1"; i++; sl_test[i].num = 0x0000000000000000L; sl_test[i].expected = "0"; i++; sl_test[i].num = -0x7FFFFFFFFFFFFFFFL -1L; sl_test[i].expected = "-9223372036854775808"; i++; sl_test[i].num = -0x7FFFFFFFFFFFFFFEL -1L; sl_test[i].expected = "-9223372036854775807"; i++; sl_test[i].num = -0x7FFFFFFFFFFFFFFDL -1L; sl_test[i].expected = "-9223372036854775806"; i++; sl_test[i].num = -0x7FFFFFFF00000000L -1L; sl_test[i].expected = "-9223372032559808513"; i++; sl_test[i].num = -0x000000007FFFFFFFL -1L; sl_test[i].expected = "-2147483648"; i++; sl_test[i].num = -0x7FFF000000000000L -1L; sl_test[i].expected = "-9223090561878065153"; i++; sl_test[i].num = -0x00007FFF00000000L -1L; sl_test[i].expected = "-140733193388033"; i++; sl_test[i].num = -0x000000007FFF0000L -1L; sl_test[i].expected = "-2147418113"; i++; sl_test[i].num = -0x0000000000007FFFL -1L; sl_test[i].expected = "-32768"; i++; sl_test[i].num = -0x7F00000000000000L -1L; sl_test[i].expected = "-9151314442816847873"; i++; sl_test[i].num = -0x007F000000000000L -1L; sl_test[i].expected = "-35747322042253313"; i++; sl_test[i].num = -0x00007F0000000000L -1L; sl_test[i].expected = "-139637976727553"; i++; sl_test[i].num = -0x0000007F00000000L -1L; sl_test[i].expected = "-545460846593"; i++; sl_test[i].num = -0x000000007F000000L -1L; sl_test[i].expected = "-2130706433"; i++; sl_test[i].num = -0x00000000007F0000L -1L; sl_test[i].expected = "-8323073"; i++; sl_test[i].num = -0x0000000000007F00L -1L; sl_test[i].expected = "-32513"; i++; sl_test[i].num = -0x000000000000007FL -1L; sl_test[i].expected = "-128"; i++; sl_test[i].num = -0x7000000000000000L -1L; sl_test[i].expected = "-8070450532247928833"; i++; sl_test[i].num = -0x0700000000000000L -1L; sl_test[i].expected = "-504403158265495553"; i++; sl_test[i].num = -0x0070000000000000L -1L; sl_test[i].expected = "-31525197391593473"; i++; sl_test[i].num = -0x0007000000000000L -1L; sl_test[i].expected = "-1970324836974593"; i++; sl_test[i].num = -0x0000700000000000L -1L; sl_test[i].expected = "-123145302310913"; i++; sl_test[i].num = -0x0000070000000000L -1L; sl_test[i].expected = "-7696581394433"; i++; sl_test[i].num = -0x0000007000000000L -1L; sl_test[i].expected = "-481036337153"; i++; sl_test[i].num = -0x0000000700000000L -1L; sl_test[i].expected = "-30064771073"; i++; sl_test[i].num = -0x0000000070000000L -1L; sl_test[i].expected = "-1879048193"; i++; sl_test[i].num = -0x0000000007000000L -1L; sl_test[i].expected = "-117440513"; i++; sl_test[i].num = -0x0000000000700000L -1L; sl_test[i].expected = "-7340033"; i++; sl_test[i].num = -0x0000000000070000L -1L; sl_test[i].expected = "-458753"; i++; sl_test[i].num = -0x0000000000007000L -1L; sl_test[i].expected = "-28673"; i++; sl_test[i].num = -0x0000000000000700L -1L; sl_test[i].expected = "-1793"; i++; sl_test[i].num = -0x0000000000000070L -1L; sl_test[i].expected = "-113"; i++; sl_test[i].num = -0x0000000000000007L -1L; sl_test[i].expected = "-8"; i++; sl_test[i].num = 0x0000000000000000L -1L; sl_test[i].expected = "-1"; num_slong_tests = i; #endif for(i=1; i<=num_slong_tests; i++) { for(j=0; j<BUFSZ; j++) sl_test[i].result[j] = 'X'; sl_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(sl_test[i].result, "%ld", sl_test[i].num); if(memcmp(sl_test[i].result, sl_test[i].expected, strlen(sl_test[i].expected))) { printf("signed long test #%.2d: Failed (Expected: %s Got: %s)\n", i, sl_test[i].expected, sl_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() signed long tests OK!\n"); else printf("Some curl_mprintf() signed long tests Failed!\n"); return failed; } static int test_curl_off_t_formatting(void) { int i, j; int num_cofft_tests; int failed = 0; #if (CURL_SIZEOF_CURL_OFF_T == 2) i=1; co_test[i].num = MPRNT_OFF_T_C(0x7FFF); co_test[i].expected = "32767"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFE); co_test[i].expected = "32766"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFD); co_test[i].expected = "32765"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7F00); co_test[i].expected = "32512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x07F0); co_test[i].expected = "2032"; i++; co_test[i].num = MPRNT_OFF_T_C(0x007F); co_test[i].expected = "127"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7000); co_test[i].expected = "28672"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0700); co_test[i].expected = "1792"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0070); co_test[i].expected = "112"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0007); co_test[i].expected = "7"; i++; co_test[i].num = MPRNT_OFF_T_C(0x5000); co_test[i].expected = "20480"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0500); co_test[i].expected = "1280"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0050); co_test[i].expected = "80"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0005); co_test[i].expected = "5"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0001); co_test[i].expected = "1"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000); co_test[i].expected = "0"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32768"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFE) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32767"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFD) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32766"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7F00) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x07F0) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2033"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x007F) -MPRNT_OFF_T_C(1); co_test[i].expected = "-128"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-28673"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0700) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1793"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0070) -MPRNT_OFF_T_C(1); co_test[i].expected = "-113"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0007) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x5000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-20481"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0500) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1281"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0050) -MPRNT_OFF_T_C(1); co_test[i].expected = "-81"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0005) -MPRNT_OFF_T_C(1); co_test[i].expected = "-6"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1"; num_cofft_tests = i; #elif (CURL_SIZEOF_CURL_OFF_T == 4) i=1; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFF); co_test[i].expected = "2147483647"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFE); co_test[i].expected = "2147483646"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFD); co_test[i].expected = "2147483645"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFF0000); co_test[i].expected = "2147418112"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00007FFF); co_test[i].expected = "32767"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7F000000); co_test[i].expected = "2130706432"; i++; co_test[i].num = MPRNT_OFF_T_C(0x007F0000); co_test[i].expected = "8323072"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00007F00); co_test[i].expected = "32512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000007F); co_test[i].expected = "127"; i++; co_test[i].num = MPRNT_OFF_T_C(0x70000000); co_test[i].expected = "1879048192"; i++; co_test[i].num = MPRNT_OFF_T_C(0x07000000); co_test[i].expected = "117440512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00700000); co_test[i].expected = "7340032"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00070000); co_test[i].expected = "458752"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00007000); co_test[i].expected = "28672"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000700); co_test[i].expected = "1792"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000070); co_test[i].expected = "112"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000007); co_test[i].expected = "7"; i++; co_test[i].num = MPRNT_OFF_T_C(0x50000000); co_test[i].expected = "1342177280"; i++; co_test[i].num = MPRNT_OFF_T_C(0x05000000); co_test[i].expected = "83886080"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00500000); co_test[i].expected = "5242880"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00050000); co_test[i].expected = "327680"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00005000); co_test[i].expected = "20480"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000500); co_test[i].expected = "1280"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000050); co_test[i].expected = "80"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000005); co_test[i].expected = "5"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000001); co_test[i].expected = "1"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000000); co_test[i].expected = "0"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147483648"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFE) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147483647"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFD) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147483646"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFF0000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147418113"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00007FFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32768"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7F000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2130706433"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x007F0000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8323073"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00007F00) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000007F) -MPRNT_OFF_T_C(1); co_test[i].expected = "-128"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x70000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1879048193"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x07000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-117440513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00700000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-7340033"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00070000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-458753"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00007000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-28673"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000700) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1793"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000070) -MPRNT_OFF_T_C(1); co_test[i].expected = "-113"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000007) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x50000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1342177281"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x05000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-83886081"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00500000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-5242881"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00050000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-327681"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00005000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-20481"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000500) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1281"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000050) -MPRNT_OFF_T_C(1); co_test[i].expected = "-81"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000005) -MPRNT_OFF_T_C(1); co_test[i].expected = "-6"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1"; num_cofft_tests = i; #elif (CURL_SIZEOF_CURL_OFF_T == 8) i=1; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFF); co_test[i].expected = "9223372036854775807"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFE); co_test[i].expected = "9223372036854775806"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFD); co_test[i].expected = "9223372036854775805"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFFFFFF00000000); co_test[i].expected = "9223372032559808512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x000000007FFFFFFF); co_test[i].expected = "2147483647"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7FFF000000000000); co_test[i].expected = "9223090561878065152"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00007FFF00000000); co_test[i].expected = "140733193388032"; i++; co_test[i].num = MPRNT_OFF_T_C(0x000000007FFF0000); co_test[i].expected = "2147418112"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000007FFF); co_test[i].expected = "32767"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7F00000000000000); co_test[i].expected = "9151314442816847872"; i++; co_test[i].num = MPRNT_OFF_T_C(0x007F000000000000); co_test[i].expected = "35747322042253312"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00007F0000000000); co_test[i].expected = "139637976727552"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000007F00000000); co_test[i].expected = "545460846592"; i++; co_test[i].num = MPRNT_OFF_T_C(0x000000007F000000); co_test[i].expected = "2130706432"; i++; co_test[i].num = MPRNT_OFF_T_C(0x00000000007F0000); co_test[i].expected = "8323072"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000007F00); co_test[i].expected = "32512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x000000000000007F); co_test[i].expected = "127"; i++; co_test[i].num = MPRNT_OFF_T_C(0x7000000000000000); co_test[i].expected = "8070450532247928832"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0700000000000000); co_test[i].expected = "504403158265495552"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0070000000000000); co_test[i].expected = "31525197391593472"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0007000000000000); co_test[i].expected = "1970324836974592"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000700000000000); co_test[i].expected = "123145302310912"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000070000000000); co_test[i].expected = "7696581394432"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000007000000000); co_test[i].expected = "481036337152"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000700000000); co_test[i].expected = "30064771072"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000070000000); co_test[i].expected = "1879048192"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000007000000); co_test[i].expected = "117440512"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000700000); co_test[i].expected = "7340032"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000070000); co_test[i].expected = "458752"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000007000); co_test[i].expected = "28672"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000700); co_test[i].expected = "1792"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000070); co_test[i].expected = "112"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000007); co_test[i].expected = "7"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000001); co_test[i].expected = "1"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000000); co_test[i].expected = "0"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9223372036854775808"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFE) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9223372036854775807"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFFFFFFFFFD) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9223372036854775806"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFFFFFF00000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9223372032559808513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x000000007FFFFFFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147483648"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7FFF000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9223090561878065153"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00007FFF00000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-140733193388033"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x000000007FFF0000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2147418113"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000007FFF) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32768"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7F00000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-9151314442816847873"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x007F000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-35747322042253313"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00007F0000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-139637976727553"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000007F00000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-545460846593"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x000000007F000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-2130706433"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x00000000007F0000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8323073"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000007F00) -MPRNT_OFF_T_C(1); co_test[i].expected = "-32513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x000000000000007F) -MPRNT_OFF_T_C(1); co_test[i].expected = "-128"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x7000000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8070450532247928833"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0700000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-504403158265495553"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0070000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-31525197391593473"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0007000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1970324836974593"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000700000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-123145302310913"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000070000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-7696581394433"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000007000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-481036337153"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000700000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-30064771073"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000070000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1879048193"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000007000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-117440513"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000700000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-7340033"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000070000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-458753"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000007000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-28673"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000000700) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1793"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000000070) -MPRNT_OFF_T_C(1); co_test[i].expected = "-113"; i++; co_test[i].num = -MPRNT_OFF_T_C(0x0000000000000007) -MPRNT_OFF_T_C(1); co_test[i].expected = "-8"; i++; co_test[i].num = MPRNT_OFF_T_C(0x0000000000000000) -MPRNT_OFF_T_C(1); co_test[i].expected = "-1"; num_cofft_tests = i; #endif /* !checksrc! enable LONGLINE */ for(i=1; i<=num_cofft_tests; i++) { for(j=0; j<BUFSZ; j++) co_test[i].result[j] = 'X'; co_test[i].result[BUFSZ-1] = '\0'; (void)curl_msprintf(co_test[i].result, "%" CURL_FORMAT_CURL_OFF_T, co_test[i].num); if(memcmp(co_test[i].result, co_test[i].expected, strlen(co_test[i].expected))) { printf("curl_off_t test #%.2d: Failed (Expected: %s Got: %s)\n", i, co_test[i].expected, co_test[i].result); failed++; } } if(!failed) printf("All curl_mprintf() curl_off_t tests OK!\n"); else printf("Some curl_mprintf() curl_off_t tests Failed!\n"); return failed; } static int _string_check(int linenumber, char *buf, const char *buf2) { if(strcmp(buf, buf2)) { /* they shouldn't differ */ printf("sprintf line %d failed:\nwe '%s'\nsystem: '%s'\n", linenumber, buf, buf2); return 1; } return 0; } #define string_check(x,y) _string_check(__LINE__, x, y) static int _strlen_check(int linenumber, char *buf, size_t len) { size_t buflen = strlen(buf); if(len != buflen) { /* they shouldn't differ */ printf("sprintf strlen:%d failed:\nwe '%d'\nsystem: '%d'\n", linenumber, buflen, len); return 1; } return 0; } #define strlen_check(x,y) _strlen_check(__LINE__, x, y) /* * The output strings in this test need to have been verified with a system * sprintf() before used here. */ static int test_string_formatting(void) { int errors = 0; char buf[256]; curl_msnprintf(buf, sizeof(buf), "%0*d%s", 2, 9, "foo"); errors += string_check(buf, "09foo"); curl_msnprintf(buf, sizeof(buf), "%*.*s", 5, 2, "foo"); errors += string_check(buf, " fo"); curl_msnprintf(buf, sizeof(buf), "%*.*s", 2, 5, "foo"); errors += string_check(buf, "foo"); curl_msnprintf(buf, sizeof(buf), "%*.*s", 0, 10, "foo"); errors += string_check(buf, "foo"); curl_msnprintf(buf, sizeof(buf), "%-10s", "foo"); errors += string_check(buf, "foo "); curl_msnprintf(buf, sizeof(buf), "%10s", "foo"); errors += string_check(buf, " foo"); curl_msnprintf(buf, sizeof(buf), "%*.*s", -10, -10, "foo"); errors += string_check(buf, "foo "); if(!errors) printf("All curl_mprintf() strings tests OK!\n"); else printf("Some curl_mprintf() string tests Failed!\n"); return errors; } static int test_weird_arguments(void) { int errors = 0; char buf[256]; int rc; /* MAX_PARAMETERS is 128, try exact 128! */ rc = curl_msnprintf(buf, sizeof(buf), "%d%d%d%d%d%d%d%d%d%d" /* 10 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 1 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 2 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 3 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 4 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 5 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 6 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 7 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 8 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 9 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 10 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 11 */ "%d%d%d%d%d%d%d%d" /* 8 */ , 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 1 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 2 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 3 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 4 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 5 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 6 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 7 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 8 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 9 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 10 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 11 */ 0, 1, 2, 3, 4, 5, 6, 7); /* 8 */ if(rc != 128) { printf("curl_mprintf() returned %d and not 128!\n", rc); errors++; } errors += string_check(buf, "0123456789" /* 10 */ "0123456789" /* 10 1 */ "0123456789" /* 10 2 */ "0123456789" /* 10 3 */ "0123456789" /* 10 4 */ "0123456789" /* 10 5 */ "0123456789" /* 10 6 */ "0123456789" /* 10 7 */ "0123456789" /* 10 8 */ "0123456789" /* 10 9 */ "0123456789" /* 10 10*/ "0123456789" /* 10 11 */ "01234567" /* 8 */ ); /* MAX_PARAMETERS is 128, try more! */ buf[0] = 0; rc = curl_msnprintf(buf, sizeof(buf), "%d%d%d%d%d%d%d%d%d%d" /* 10 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 1 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 2 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 3 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 4 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 5 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 6 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 7 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 8 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 9 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 10 */ "%d%d%d%d%d%d%d%d%d%d" /* 10 11 */ "%d%d%d%d%d%d%d%d%d" /* 9 */ , 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 1 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 2 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 3 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 4 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 5 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 6 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 7 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 8 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 9 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 10 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, /* 10 11 */ 0, 1, 2, 3, 4, 5, 6, 7, 8); /* 9 */ if(rc != -1) { printf("curl_mprintf() returned %d and not -1!\n", rc); errors++; } errors += string_check(buf, ""); if(errors) printf("Some curl_mprintf() weird arguments tests failed!\n"); return errors; } /* DBL_MAX value from Linux */ #define MAXIMIZE -179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.000000 static int test_float_formatting(void) { int errors = 0; char buf[512]; /* larger than max float size */ curl_msnprintf(buf, sizeof(buf), "%f", 9.0); errors += string_check(buf, "9.000000"); curl_msnprintf(buf, sizeof(buf), "%.1f", 9.1); errors += string_check(buf, "9.1"); curl_msnprintf(buf, sizeof(buf), "%.2f", 9.1); errors += string_check(buf, "9.10"); curl_msnprintf(buf, sizeof(buf), "%.0f", 9.1); errors += string_check(buf, "9"); curl_msnprintf(buf, sizeof(buf), "%0f", 9.1); errors += string_check(buf, "9.100000"); curl_msnprintf(buf, sizeof(buf), "%10f", 9.1); errors += string_check(buf, " 9.100000"); curl_msnprintf(buf, sizeof(buf), "%10.3f", 9.1); errors += string_check(buf, " 9.100"); curl_msnprintf(buf, sizeof(buf), "%-10.3f", 9.1); errors += string_check(buf, "9.100 "); curl_msnprintf(buf, sizeof(buf), "%-10.3f", 9.123456); errors += string_check(buf, "9.123 "); curl_msnprintf(buf, sizeof(buf), "%.-2f", 9.1); errors += string_check(buf, "9.100000"); curl_msnprintf(buf, sizeof(buf), "%*f", 10, 9.1); errors += string_check(buf, " 9.100000"); curl_msnprintf(buf, sizeof(buf), "%*f", 3, 9.1); errors += string_check(buf, "9.100000"); curl_msnprintf(buf, sizeof(buf), "%*f", 6, 9.2987654); errors += string_check(buf, "9.298765"); curl_msnprintf(buf, sizeof(buf), "%*f", 6, 9.298765); errors += string_check(buf, "9.298765"); curl_msnprintf(buf, sizeof(buf), "%*f", 6, 9.29876); errors += string_check(buf, "9.298760"); curl_msnprintf(buf, sizeof(buf), "%.*f", 6, 9.2987654); errors += string_check(buf, "9.298765"); curl_msnprintf(buf, sizeof(buf), "%.*f", 5, 9.2987654); errors += string_check(buf, "9.29877"); curl_msnprintf(buf, sizeof(buf), "%.*f", 4, 9.2987654); errors += string_check(buf, "9.2988"); curl_msnprintf(buf, sizeof(buf), "%.*f", 3, 9.2987654); errors += string_check(buf, "9.299"); curl_msnprintf(buf, sizeof(buf), "%.*f", 2, 9.2987654); errors += string_check(buf, "9.30"); curl_msnprintf(buf, sizeof(buf), "%.*f", 1, 9.2987654); errors += string_check(buf, "9.3"); curl_msnprintf(buf, sizeof(buf), "%.*f", 0, 9.2987654); errors += string_check(buf, "9"); /* very large precisions easily turn into system specific outputs so we only check the output buffer length here as we know the internal limit */ curl_msnprintf(buf, sizeof(buf), "%.*f", (1<<30), 9.2987654); errors += strlen_check(buf, 325); curl_msnprintf(buf, sizeof(buf), "%10000.10000f", 9.2987654); errors += strlen_check(buf, 325); curl_msnprintf(buf, sizeof(buf), "%240.10000f", 123456789123456789123456789.2987654); errors += strlen_check(buf, 325); /* 1<<31 turns negative (-2147483648) when used signed */ curl_msnprintf(buf, sizeof(buf), "%*f", (1<<31), 9.1); errors += string_check(buf, "9.100000"); /* curl_msnprintf() limits a single float output to 325 bytes maximum width */ curl_msnprintf(buf, sizeof(buf), "%*f", (1<<30), 9.1); errors += string_check(buf, " 9.100000"); curl_msnprintf(buf, sizeof(buf), "%100000f", 9.1); errors += string_check(buf, " 9.100000"); curl_msnprintf(buf, sizeof(buf), "%f", MAXIMIZE); errors += strlen_check(buf, 317); curl_msnprintf(buf, 2, "%f", MAXIMIZE); errors += strlen_check(buf, 1); curl_msnprintf(buf, 3, "%f", MAXIMIZE); errors += strlen_check(buf, 2); curl_msnprintf(buf, 4, "%f", MAXIMIZE); errors += strlen_check(buf, 3); curl_msnprintf(buf, 5, "%f", MAXIMIZE); errors += strlen_check(buf, 4); curl_msnprintf(buf, 6, "%f", MAXIMIZE); errors += strlen_check(buf, 5); if(!errors) printf("All float strings tests OK!\n"); else printf("test_float_formatting Failed!\n"); return errors; } int test(char *URL) { int errors = 0; (void)URL; /* not used */ errors += test_weird_arguments(); errors += test_unsigned_short_formatting(); errors += test_signed_short_formatting(); errors += test_unsigned_int_formatting(); errors += test_signed_int_formatting(); errors += test_unsigned_long_formatting(); errors += test_signed_long_formatting(); errors += test_curl_off_t_formatting(); errors += test_string_formatting(); errors += test_float_formatting(); if(errors) return TEST_ERR_MAJOR_BAD; else return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_5493_2
crossvul-cpp_data_bad_345_0
/* * card-cac.c: Support for CAC from NIST SP800-73 * card-default.c: Support for cards with no driver * * Copyright (C) 2001, 2002 Juha Yrjölä <juha.yrjola@iki.fi> * Copyright (C) 2005,2006,2007,2008,2009,2010 Douglas E. Engert <deengert@anl.gov> * Copyright (C) 2006, Identity Alliance, Thomas Harning <thomas.harning@identityalliance.com> * Copyright (C) 2007, EMC, Russell Larner <rlarner@rsa.com> * Copyright (C) 2016 - 2018, Red Hat, Inc. * * CAC driver author: Robert Relyea <rrelyea@redhat.com> * Further work: Jakub Jelen <jjelen@redhat.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <ctype.h> #include <fcntl.h> #include <limits.h> #include <stdlib.h> #include <string.h> #ifdef _WIN32 #include <io.h> #else #include <unistd.h> #endif #ifdef ENABLE_OPENSSL /* openssl only needed for card administration */ #include <openssl/evp.h> #include <openssl/bio.h> #include <openssl/pem.h> #include <openssl/rand.h> #include <openssl/rsa.h> #endif /* ENABLE_OPENSSL */ #include "internal.h" #include "simpletlv.h" #include "cardctl.h" #ifdef ENABLE_ZLIB #include "compression.h" #endif #include "iso7816.h" #define CAC_MAX_SIZE 4096 /* arbitrary, just needs to be 'large enough' */ /* * CAC hardware and APDU constants */ #define CAC_MAX_CHUNK_SIZE 240 #define CAC_INS_SIGN_DECRYPT 0x42 /* A crypto operation */ #define CAC_INS_READ_FILE 0x52 /* read a TL or V file */ #define CAC_INS_GET_ACR 0x4c #define CAC_INS_GET_PROPERTIES 0x56 #define CAC_P1_STEP 0x80 #define CAC_P1_FINAL 0x00 #define CAC_FILE_TAG 1 #define CAC_FILE_VALUE 2 /* TAGS in a TL file */ #define CAC_TAG_CERTIFICATE 0x70 #define CAC_TAG_CERTINFO 0x71 #define CAC_TAG_MSCUID 0x72 #define CAC_TAG_CUID 0xF0 #define CAC_TAG_CC_VERSION_NUMBER 0xF1 #define CAC_TAG_GRAMMAR_VERION_NUMBER 0xF2 #define CAC_TAG_CARDURL 0xF3 #define CAC_TAG_PKCS15 0xF4 #define CAC_TAG_ACCESS_CONTROL 0xF6 #define CAC_TAG_DATA_MODEL 0xF5 #define CAC_TAG_CARD_APDU 0xF7 #define CAC_TAG_REDIRECTION 0xFA #define CAC_TAG_CAPABILITY_TUPLES 0xFB #define CAC_TAG_STATUS_TUPLES 0xFC #define CAC_TAG_NEXT_CCC 0xFD #define CAC_TAG_ERROR_CODES 0xFE #define CAC_TAG_APPLET_FAMILY 0x01 #define CAC_TAG_NUMBER_APPLETS 0x94 #define CAC_TAG_APPLET_ENTRY 0x93 #define CAC_TAG_APPLET_AID 0x92 #define CAC_TAG_APPLET_INFORMATION 0x01 #define CAC_TAG_NUMBER_OF_OBJECTS 0x40 #define CAC_TAG_TV_BUFFER 0x50 #define CAC_TAG_PKI_OBJECT 0x51 #define CAC_TAG_OBJECT_ID 0x41 #define CAC_TAG_BUFFER_PROPERTIES 0x42 #define CAC_TAG_PKI_PROPERTIES 0x43 #define CAC_APP_TYPE_GENERAL 0x01 #define CAC_APP_TYPE_SKI 0x02 #define CAC_APP_TYPE_PKI 0x04 #define CAC_ACR_ACR 0x00 #define CAC_ACR_APPLET_OBJECT 0x10 #define CAC_ACR_AMP 0x20 #define CAC_ACR_SERVICE 0x21 /* hardware data structures (returned in the CCC) */ /* part of the card_url */ typedef struct cac_access_profile { u8 GCACR_listID; u8 GCACR_readTagListACRID; u8 GCACR_updatevalueACRID; u8 GCACR_readvalueACRID; u8 GCACR_createACRID; u8 GCACR_deleteACRID; u8 CryptoACR_listID; u8 CryptoACR_getChallengeACRID; u8 CryptoACR_internalAuthenicateACRID; u8 CryptoACR_pkiComputeACRID; u8 CryptoACR_readTagListACRID; u8 CryptoACR_updatevalueACRID; u8 CryptoACR_readvalueACRID; u8 CryptoACR_createACRID; u8 CryptoACR_deleteACRID; } cac_access_profile_t; /* part of the card url */ typedef struct cac_access_key_info { u8 keyFileID[2]; u8 keynumber; } cac_access_key_info_t; typedef struct cac_card_url { u8 rid[5]; u8 cardApplicationType; u8 objectID[2]; u8 applicationID[2]; cac_access_profile_t accessProfile; u8 pinID; /* not used for VM cards */ cac_access_key_info_t accessKeyInfo; /* not used for VM cards */ u8 keyCryptoAlgorithm; /* not used for VM cards */ } cac_card_url_t; typedef struct cac_cuid { u8 gsc_rid[5]; u8 manufacturer_id; u8 card_type; u8 card_id; } cac_cuid_t; /* data structures to store meta data about CAC objects */ typedef struct cac_object { const char *name; int fd; sc_path_t path; } cac_object_t; #define CAC_MAX_OBJECTS 16 typedef struct { /* OID has two bytes */ unsigned char oid[2]; /* Format is NOT SimpleTLV? */ unsigned char simpletlv; /* Is certificate object and private key is initialized */ unsigned char privatekey; } cac_properties_object_t; typedef struct { unsigned int num_objects; cac_properties_object_t objects[CAC_MAX_OBJECTS]; } cac_properties_t; /* * Flags for Current Selected Object Type * CAC files are TLV files, with TL and V separated. For generic * containers we reintegrate the TL anv V portions into a single * file to read. Certs are also TLV files, but pkcs15 wants the * actual certificate. At select time we know the patch which tells * us what time of files we want to read. We remember that type * so that read_binary can do the appropriate processing. */ #define CAC_OBJECT_TYPE_CERT 1 #define CAC_OBJECT_TYPE_TLV_FILE 4 #define CAC_OBJECT_TYPE_GENERIC 5 /* * CAC private data per card state */ typedef struct cac_private_data { int object_type; /* select set this so we know how to read the file */ int cert_next; /* index number for the next certificate found in the list */ u8 *cache_buf; /* cached version of the currently selected file */ size_t cache_buf_len; /* length of the cached selected file */ int cached; /* is the cached selected file valid */ cac_cuid_t cuid; /* card unique ID from the CCC */ u8 *cac_id; /* card serial number */ size_t cac_id_len; /* card serial number len */ list_t pki_list; /* list of pki containers */ cac_object_t *pki_current; /* current pki object _ctl function */ list_t general_list; /* list of general containers */ cac_object_t *general_current; /* current object for _ctl function */ sc_path_t *aca_path; /* ACA path to be selected before pin verification */ } cac_private_data_t; #define CAC_DATA(card) ((cac_private_data_t*)card->drv_data) int cac_list_compare_path(const void *a, const void *b) { if (a == NULL || b == NULL) return 1; return memcmp( &((cac_object_t *) a)->path, &((cac_object_t *) b)->path, sizeof(sc_path_t)); } /* For SimCList autocopy, we need to know the size of the data elements */ size_t cac_list_meter(const void *el) { return sizeof(cac_object_t); } static cac_private_data_t *cac_new_private_data(void) { cac_private_data_t *priv; priv = calloc(1, sizeof(cac_private_data_t)); if (!priv) return NULL; list_init(&priv->pki_list); list_attributes_comparator(&priv->pki_list, cac_list_compare_path); list_attributes_copy(&priv->pki_list, cac_list_meter, 1); list_init(&priv->general_list); list_attributes_comparator(&priv->general_list, cac_list_compare_path); list_attributes_copy(&priv->general_list, cac_list_meter, 1); /* set other fields as appropriate */ return priv; } static void cac_free_private_data(cac_private_data_t *priv) { free(priv->cac_id); free(priv->cache_buf); free(priv->aca_path); list_destroy(&priv->pki_list); list_destroy(&priv->general_list); free(priv); return; } static int cac_add_object_to_list(list_t *list, const cac_object_t *object) { if (list_append(list, object) < 0) return SC_ERROR_UNKNOWN; return SC_SUCCESS; } /* * Set up the normal CAC paths */ #define CAC_TO_AID(x) x, sizeof(x)-1 #define CAC_2_RID "\xA0\x00\x00\x01\x16" #define CAC_1_RID "\xA0\x00\x00\x00\x79" static const sc_path_t cac_ACA_Path = { "", 0, 0,0,SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x10\x00") } }; static const sc_path_t cac_CCC_Path = { "", 0, 0,0,SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_2_RID "\xDB\x00") } }; #define MAX_CAC_SLOTS 16 /* Maximum number of slots is 16 now */ /* default certificate labels for the CAC card */ static const char *cac_labels[MAX_CAC_SLOTS] = { "CAC ID Certificate", "CAC Email Signature Certificate", "CAC Email Encryption Certificate", "CAC Cert 4", "CAC Cert 5", "CAC Cert 6", "CAC Cert 7", "CAC Cert 8", "CAC Cert 9", "CAC Cert 10", "CAC Cert 11", "CAC Cert 12", "CAC Cert 13", "CAC Cert 14", "CAC Cert 15", "CAC Cert 16" }; /* template for a CAC pki object */ static const cac_object_t cac_cac_pki_obj = { "CAC Certificate", 0x0, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x01\x00") } } }; /* template for emulated cuid */ static const cac_cuid_t cac_cac_cuid = { { 0xa0, 0x00, 0x00, 0x00, 0x79 }, 2, 2, 0 }; /* * CAC general objects defined in 4.3.1.2 of CAC Applet Developer Guide Version 1.0. * doubles as a source for CAC-2 labels. */ static const cac_object_t cac_objects[] = { { "Person Instance", 0x200, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\x00") }}}, { "Personnel", 0x201, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\x01") }}}, { "Benefits", 0x202, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\x02") }}}, { "Other Benefits", 0x203, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\x03") }}}, { "PKI Credential", 0x2FD, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\xFD") }}}, { "PKI Certificate", 0x2FE, { { 0 }, 0, 0, 0, SC_PATH_TYPE_DF_NAME, { CAC_TO_AID(CAC_1_RID "\x02\xFE") }}}, }; static const int cac_object_count = sizeof(cac_objects)/sizeof(cac_objects[0]); /* * use the object id to find our object info on the object in our CAC-1 list */ static const cac_object_t *cac_find_obj_by_id(unsigned short object_id) { int i; for (i = 0; i < cac_object_count; i++) { if (cac_objects[i].fd == object_id) { return &cac_objects[i]; } } return NULL; } /* * Lookup the path in the pki list to see if it is a cert path */ static int cac_is_cert(cac_private_data_t * priv, const sc_path_t *in_path) { cac_object_t test_obj; test_obj.path = *in_path; test_obj.path.index = 0; test_obj.path.count = 0; return (list_contains(&priv->pki_list, &test_obj) != 0); } /* * Send a command and receive data. * * A caller may provide a buffer, and length to read. If not provided, * an internal 4096 byte buffer is used, and a copy is returned to the * caller. that need to be freed by the caller. * * modelled after a similar function in card-piv.c */ static int cac_apdu_io(sc_card_t *card, int ins, int p1, int p2, const u8 * sendbuf, size_t sendbuflen, u8 ** recvbuf, size_t * recvbuflen) { int r; sc_apdu_t apdu; u8 rbufinitbuf[CAC_MAX_SIZE]; u8 *rbuf; size_t rbuflen; unsigned int apdu_case = SC_APDU_CASE_1; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "%02x %02x %02x %"SC_FORMAT_LEN_SIZE_T"u : %"SC_FORMAT_LEN_SIZE_T"u %"SC_FORMAT_LEN_SIZE_T"u\n", ins, p1, p2, sendbuflen, card->max_send_size, card->max_recv_size); rbuf = rbufinitbuf; rbuflen = sizeof(rbufinitbuf); /* if caller provided a buffer and length */ if (recvbuf && *recvbuf && recvbuflen && *recvbuflen) { rbuf = *recvbuf; rbuflen = *recvbuflen; } if (recvbuf) { if (sendbuf) apdu_case = SC_APDU_CASE_4_SHORT; else apdu_case = SC_APDU_CASE_2_SHORT; } else if (sendbuf) apdu_case = SC_APDU_CASE_3_SHORT; sc_format_apdu(card, &apdu, apdu_case, ins, p1, p2); apdu.lc = sendbuflen; apdu.datalen = sendbuflen; apdu.data = sendbuf; if (recvbuf) { apdu.resp = rbuf; apdu.le = (rbuflen > 255) ? 255 : rbuflen; apdu.resplen = rbuflen; } else { apdu.resp = rbuf; apdu.le = 0; apdu.resplen = 0; } sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "calling sc_transmit_apdu flags=%lx le=%"SC_FORMAT_LEN_SIZE_T"u, resplen=%"SC_FORMAT_LEN_SIZE_T"u, resp=%p", apdu.flags, apdu.le, apdu.resplen, apdu.resp); /* with new adpu.c and chaining, this actually reads the whole object */ r = sc_transmit_apdu(card, &apdu); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "result r=%d apdu.resplen=%"SC_FORMAT_LEN_SIZE_T"u sw1=%02x sw2=%02x", r, apdu.resplen, apdu.sw1, apdu.sw2); if (r < 0) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,"Transmit failed"); goto err; } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r < 0) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Card returned error "); goto err; } if (recvbuflen) { if (recvbuf && *recvbuf == NULL) { *recvbuf = malloc(apdu.resplen); if (*recvbuf == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto err; } memcpy(*recvbuf, rbuf, apdu.resplen); } *recvbuflen = apdu.resplen; r = *recvbuflen; } err: SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } /* * Get ACR of currently ACA applet identified by the acr_type * 5.3.3.5 Get ACR APDU */ static int cac_get_acr(sc_card_t *card, int acr_type, u8 **out_buf, size_t *out_len) { u8 *out = NULL; /* XXX assuming it will not be longer than 255 B */ size_t len = 256; int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* for simplicity we support only ACR without arguments now */ if (acr_type != 0x00 && acr_type != 0x10 && acr_type != 0x20 && acr_type != 0x21) { return SC_ERROR_INVALID_ARGUMENTS; } r = cac_apdu_io(card, CAC_INS_GET_ACR, acr_type, 0, NULL, 0, &out, &len); if (len == 0) { r = SC_ERROR_FILE_NOT_FOUND; } if (r < 0) goto fail; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "got %"SC_FORMAT_LEN_SIZE_T"u bytes out=%p", len, out); *out_len = len; *out_buf = out; return SC_SUCCESS; fail: if (out) free(out); *out_buf = NULL; *out_len = 0; return r; } /* * Read a CAC TLV file. Parameters specify if the TLV file is TL (Tag/Length) file or a V (value) file */ #define HIGH_BYTE_OF_SHORT(x) (((x)>> 8) & 0xff) #define LOW_BYTE_OF_SHORT(x) ((x) & 0xff) static int cac_read_file(sc_card_t *card, int file_type, u8 **out_buf, size_t *out_len) { u8 params[2]; u8 count[2]; u8 *out = NULL; u8 *out_ptr; size_t offset = 0; size_t size = 0; size_t left = 0; size_t len; int r; params[0] = file_type; params[1] = 2; /* get the size */ len = sizeof(count); out_ptr = count; r = cac_apdu_io(card, CAC_INS_READ_FILE, 0, 0, &params[0], sizeof(params), &out_ptr, &len); if (len == 0) { r = SC_ERROR_FILE_NOT_FOUND; } if (r < 0) goto fail; left = size = lebytes2ushort(count); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "got %"SC_FORMAT_LEN_SIZE_T"u bytes out_ptr=%p count&=%p count[0]=0x%02x count[1]=0x%02x, len=0x%04"SC_FORMAT_LEN_SIZE_T"x (%"SC_FORMAT_LEN_SIZE_T"u)", len, out_ptr, &count, count[0], count[1], size, size); out = out_ptr = malloc(size); if (out == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto fail; } for (offset += 2; left > 0; offset += len, left -= len, out_ptr += len) { len = MIN(left, CAC_MAX_CHUNK_SIZE); params[1] = len; r = cac_apdu_io(card, CAC_INS_READ_FILE, HIGH_BYTE_OF_SHORT(offset), LOW_BYTE_OF_SHORT(offset), &params[0], sizeof(params), &out_ptr, &len); /* if there is no data, assume there is no file */ if (len == 0) { r = SC_ERROR_FILE_NOT_FOUND; } if (r < 0) { goto fail; } } *out_len = size; *out_buf = out; return SC_SUCCESS; fail: if (out) free(out); *out_len = 0; return r; } /* * Callers of this may be expecting a certificate, * select file will have saved the object type for us * as well as set that we want the cert from the object. */ static int cac_read_binary(sc_card_t *card, unsigned int idx, unsigned char *buf, size_t count, unsigned long flags) { cac_private_data_t * priv = CAC_DATA(card); int r = 0; u8 *tl = NULL, *val = NULL; u8 *tl_ptr, *val_ptr, *tlv_ptr, *tl_start; u8 *cert_ptr; size_t tl_len, val_len, tlv_len; size_t len, tl_head_len, cert_len; u8 cert_type, tag; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* if we didn't return it all last time, return the remainder */ if (priv->cached) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "returning cached value idx=%d count=%"SC_FORMAT_LEN_SIZE_T"u", idx, count); if (idx > priv->cache_buf_len) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_END_REACHED); } len = MIN(count, priv->cache_buf_len-idx); memcpy(buf, &priv->cache_buf[idx], len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, len); } sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "clearing cache idx=%d count=%"SC_FORMAT_LEN_SIZE_T"u", idx, count); if (priv->cache_buf) { free(priv->cache_buf); priv->cache_buf = NULL; priv->cache_buf_len = 0; } if (priv->object_type <= 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INTERNAL); r = cac_read_file(card, CAC_FILE_TAG, &tl, &tl_len); if (r < 0) { goto done; } r = cac_read_file(card, CAC_FILE_VALUE, &val, &val_len); if (r < 0) goto done; switch (priv->object_type) { case CAC_OBJECT_TYPE_TLV_FILE: tlv_len = tl_len + val_len; priv->cache_buf = malloc(tlv_len); if (priv->cache_buf == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto done; } priv->cache_buf_len = tlv_len; for (tl_ptr = tl, val_ptr=val, tlv_ptr = priv->cache_buf; tl_len >= 2 && tlv_len > 0; val_len -= len, tlv_len -= len, val_ptr += len, tlv_ptr += len) { /* get the tag and the length */ tl_start = tl_ptr; if (sc_simpletlv_read_tag(&tl_ptr, tl_len, &tag, &len) != SC_SUCCESS) break; tl_head_len = (tl_ptr - tl_start); sc_simpletlv_put_tag(tag, len, tlv_ptr, tlv_len, &tlv_ptr); tlv_len -= tl_head_len; tl_len -= tl_head_len; /* don't crash on bad data */ if (val_len < len) { len = val_len; } /* if we run out of return space, truncate */ if (tlv_len < len) { len = tlv_len; } memcpy(tlv_ptr, val_ptr, len); } break; case CAC_OBJECT_TYPE_CERT: /* read file */ sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, " obj= cert_file, val_len=%"SC_FORMAT_LEN_SIZE_T"u (0x%04"SC_FORMAT_LEN_SIZE_T"x)", val_len, val_len); cert_len = 0; cert_ptr = NULL; cert_type = 0; for (tl_ptr = tl, val_ptr = val; tl_len >= 2; val_len -= len, val_ptr += len, tl_len -= tl_head_len) { tl_start = tl_ptr; if (sc_simpletlv_read_tag(&tl_ptr, tl_len, &tag, &len) != SC_SUCCESS) break; tl_head_len = tl_ptr - tl_start; /* incomplete value */ if (val_len < len) break; if (tag == CAC_TAG_CERTIFICATE) { cert_len = len; cert_ptr = val_ptr; } if (tag == CAC_TAG_CERTINFO) { if ((len >= 1) && (val_len >=1)) { cert_type = *val_ptr; } } if (tag == CAC_TAG_MSCUID) { sc_log_hex(card->ctx, "MSCUID", val_ptr, len); } if ((val_len < len) || (tl_len < tl_head_len)) { break; } } /* if the info byte is 1, then the cert is compressed, decompress it */ if ((cert_type & 0x3) == 1) { #ifdef ENABLE_ZLIB r = sc_decompress_alloc(&priv->cache_buf, &priv->cache_buf_len, cert_ptr, cert_len, COMPRESSION_AUTO); #else sc_log(card->ctx, "CAC compression not supported, no zlib"); r = SC_ERROR_NOT_SUPPORTED; #endif if (r) goto done; } else if (cert_len > 0) { priv->cache_buf = malloc(cert_len); if (priv->cache_buf == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto done; } priv->cache_buf_len = cert_len; memcpy(priv->cache_buf, cert_ptr, cert_len); } else { sc_log(card->ctx, "Can't read zero-length certificate"); goto done; } break; case CAC_OBJECT_TYPE_GENERIC: /* TODO * We have some two buffers in unknown encoding that we * need to present in PKCS#15 layer. */ default: /* Unknown object type */ sc_log(card->ctx, "Unknown object type: %x", priv->object_type); r = SC_ERROR_INTERNAL; goto done; } /* OK we've read the data, now copy the required portion out to the callers buffer */ priv->cached = 1; len = MIN(count, priv->cache_buf_len-idx); memcpy(buf, &priv->cache_buf[idx], len); r = len; done: if (tl) free(tl); if (val) free(val); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } /* CAC driver is read only */ static int cac_write_binary(sc_card_t *card, unsigned int idx, const u8 *buf, size_t count, unsigned long flags) { SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_NOT_SUPPORTED); } /* initialize getting a list and return the number of elements in the list */ static int cac_get_init_and_get_count(list_t *list, cac_object_t **entry, int *countp) { *countp = list_size(list); list_iterator_start(list); *entry = list_iterator_next(list); return SC_SUCCESS; } /* finalize the list iterator */ static int cac_final_iterator(list_t *list) { list_iterator_stop(list); return SC_SUCCESS; } /* fill in the obj_info for the current object on the list and advance to the next object */ static int cac_fill_object_info(list_t *list, cac_object_t **entry, sc_pkcs15_data_info_t *obj_info) { memset(obj_info, 0, sizeof(sc_pkcs15_data_info_t)); if (*entry == NULL) { return SC_ERROR_FILE_END_REACHED; } obj_info->path = (*entry)->path; obj_info->path.count = CAC_MAX_SIZE-1; /* read something from the object */ obj_info->id.value[0] = ((*entry)->fd >> 8) & 0xff; obj_info->id.value[1] = (*entry)->fd & 0xff; obj_info->id.len = 2; strncpy(obj_info->app_label, (*entry)->name, SC_PKCS15_MAX_LABEL_SIZE-1); *entry = list_iterator_next(list); return SC_SUCCESS; } static int cac_get_serial_nr_from_CUID(sc_card_t* card, sc_serial_number_t* serial) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (card->serialnr.len) { *serial = card->serialnr; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } if (priv->cac_id_len) { serial->len = MIN(priv->cac_id_len, SC_MAX_SERIALNR); memcpy(serial->value, priv->cac_id, priv->cac_id_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_FILE_NOT_FOUND); } static int cac_get_ACA_path(sc_card_t *card, sc_path_t *path) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL); if (priv->aca_path) { *path = *priv->aca_path; } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } static int cac_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr) { cac_private_data_t * priv = CAC_DATA(card); LOG_FUNC_CALLED(card->ctx); sc_log(card->ctx, "cmd=%ld ptr=%p", cmd, ptr); if (priv == NULL) { LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); } switch(cmd) { case SC_CARDCTL_CAC_GET_ACA_PATH: return cac_get_ACA_path(card, (sc_path_t *) ptr); case SC_CARDCTL_GET_SERIALNR: return cac_get_serial_nr_from_CUID(card, (sc_serial_number_t *) ptr); case SC_CARDCTL_CAC_INIT_GET_GENERIC_OBJECTS: return cac_get_init_and_get_count(&priv->general_list, &priv->general_current, (int *)ptr); case SC_CARDCTL_CAC_INIT_GET_CERT_OBJECTS: return cac_get_init_and_get_count(&priv->pki_list, &priv->pki_current, (int *)ptr); case SC_CARDCTL_CAC_GET_NEXT_GENERIC_OBJECT: return cac_fill_object_info(&priv->general_list, &priv->general_current, (sc_pkcs15_data_info_t *)ptr); case SC_CARDCTL_CAC_GET_NEXT_CERT_OBJECT: return cac_fill_object_info(&priv->pki_list, &priv->pki_current, (sc_pkcs15_data_info_t *)ptr); case SC_CARDCTL_CAC_FINAL_GET_GENERIC_OBJECTS: return cac_final_iterator(&priv->general_list); case SC_CARDCTL_CAC_FINAL_GET_CERT_OBJECTS: return cac_final_iterator(&priv->pki_list); } LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } static int cac_get_challenge(sc_card_t *card, u8 *rnd, size_t len) { /* CAC requires 8 byte response */ u8 rbuf[8]; u8 *rbufp = &rbuf[0]; size_t out_len = sizeof rbuf; int r; LOG_FUNC_CALLED(card->ctx); r = cac_apdu_io(card, 0x84, 0x00, 0x00, NULL, 0, &rbufp, &out_len); LOG_TEST_RET(card->ctx, r, "Could not get challenge"); if (len < out_len) { out_len = len; } memcpy(rnd, rbuf, out_len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, (int) out_len); } static int cac_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { int r = SC_SUCCESS; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "flags=%08lx op=%d alg=%d algf=%08x algr=%08x kr0=%02x, krfl=%"SC_FORMAT_LEN_SIZE_T"u\n", env->flags, env->operation, env->algorithm, env->algorithm_flags, env->algorithm_ref, env->key_ref[0], env->key_ref_len); if (env->algorithm != SC_ALGORITHM_RSA) { r = SC_ERROR_NO_CARD_SUPPORT; } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, r); } static int cac_restore_security_env(sc_card_t *card, int se_num) { SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } static int cac_rsa_op(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { int r; u8 *outp, *rbuf; size_t rbuflen, outplen; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "datalen=%"SC_FORMAT_LEN_SIZE_T"u outlen=%"SC_FORMAT_LEN_SIZE_T"u\n", datalen, outlen); outp = out; outplen = outlen; /* Not strictly necessary. This code requires the caller to have selected the correct PKI container * and authenticated to that container with the verifyPin command... All of this under the reader lock. * The PKCS #15 higher level driver code does all this correctly (it's the same for all cards, just * different sets of APDU's that need to be called), so this call is really a little bit of paranoia */ r = sc_lock(card); if (r != SC_SUCCESS) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); rbuf = NULL; rbuflen = 0; for (; datalen > CAC_MAX_CHUNK_SIZE; data += CAC_MAX_CHUNK_SIZE, datalen -= CAC_MAX_CHUNK_SIZE) { r = cac_apdu_io(card, CAC_INS_SIGN_DECRYPT, CAC_P1_STEP, 0, data, CAC_MAX_CHUNK_SIZE, &rbuf, &rbuflen); if (r < 0) { break; } if (rbuflen != 0) { int n = MIN(rbuflen, outplen); memcpy(outp,rbuf, n); outp += n; outplen -= n; } free(rbuf); rbuf = NULL; rbuflen = 0; } if (r < 0) { goto err; } rbuf = NULL; rbuflen = 0; r = cac_apdu_io(card, CAC_INS_SIGN_DECRYPT, CAC_P1_FINAL, 0, data, datalen, &rbuf, &rbuflen); if (r < 0) { goto err; } if (rbuflen != 0) { int n = MIN(rbuflen, outplen); memcpy(outp,rbuf, n); /*outp += n; unused */ outplen -= n; } free(rbuf); rbuf = NULL; r = outlen-outplen; err: sc_unlock(card); if (r < 0) { sc_mem_clear(out, outlen); } if (rbuf) { free(rbuf); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } static int cac_compute_signature(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, cac_rsa_op(card, data, datalen, out, outlen)); } static int cac_decipher(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, cac_rsa_op(card, data, datalen, out, outlen)); } static int cac_parse_properties_object(sc_card_t *card, u8 type, u8 *data, size_t data_len, cac_properties_object_t *object) { size_t len; u8 *val, *val_end, tag; int parsed = 0; if (data_len < 11) return -1; /* Initilize: non-PKI applet */ object->privatekey = 0; val = data; val_end = data + data_len; for (; val < val_end; val += len) { /* get the tag and the length */ if (sc_simpletlv_read_tag(&val, val_end - val, &tag, &len) != SC_SUCCESS) break; switch (tag) { case CAC_TAG_OBJECT_ID: if (len != 2) { sc_log(card->ctx, "TAG: Object ID: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Object ID = 0x%02x 0x%02x", val[0], val[1]); memcpy(&object->oid, val, 2); parsed++; break; case CAC_TAG_BUFFER_PROPERTIES: if (len != 5) { sc_log(card->ctx, "TAG: Buffer Properties: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } /* First byte is "Type of Tag Supported" */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Buffer Properties: Type of Tag Supported = 0x%02x", val[0]); object->simpletlv = val[0]; parsed++; break; case CAC_TAG_PKI_PROPERTIES: /* 4th byte is "Private Key Initialized" */ if (len != 4) { sc_log(card->ctx, "TAG: PKI Properties: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } if (type != CAC_TAG_PKI_OBJECT) { sc_log(card->ctx, "TAG: PKI Properties outside of PKI Object"); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: PKI Properties: Private Key Initialized = 0x%02x", val[2]); object->privatekey = val[2]; parsed++; break; default: /* ignore tags we don't understand */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Unknown (0x%02x)",tag ); break; } } if (parsed < 2) return SC_ERROR_INVALID_DATA; return SC_SUCCESS; } static int cac_get_properties(sc_card_t *card, cac_properties_t *prop) { u8 *rbuf = NULL; size_t rbuflen = 0, len; u8 *val, *val_end, tag; size_t i = 0; int r; prop->num_objects = 0; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); r = cac_apdu_io(card, CAC_INS_GET_PROPERTIES, 0x01, 0x00, NULL, 0, &rbuf, &rbuflen); if (r < 0) return r; val = rbuf; val_end = val + rbuflen; for (; val < val_end; val += len) { /* get the tag and the length */ if (sc_simpletlv_read_tag(&val, val_end - val, &tag, &len) != SC_SUCCESS) break; switch (tag) { case CAC_TAG_APPLET_INFORMATION: if (len != 5) { sc_log(card->ctx, "TAG: Applet Information: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Applet Information: Family: 0x%0x", val[0]); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, " Applet Version: 0x%02x 0x%02x 0x%02x 0x%02x", val[1], val[2], val[3], val[4]); break; case CAC_TAG_NUMBER_OF_OBJECTS: if (len != 1) { sc_log(card->ctx, "TAG: Num objects: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Num objects = %hhd", *val); /* make sure we do not overrun buffer */ prop->num_objects = MIN(val[0], CAC_MAX_OBJECTS); break; case CAC_TAG_TV_BUFFER: if (len != 17) { sc_log(card->ctx, "TAG: TV Object: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: TV Object nr. %"SC_FORMAT_LEN_SIZE_T"u", i); if (i >= CAC_MAX_OBJECTS) { free(rbuf); return SC_SUCCESS; } if (cac_parse_properties_object(card, tag, val, len, &prop->objects[i]) == SC_SUCCESS) i++; break; case CAC_TAG_PKI_OBJECT: if (len != 17) { sc_log(card->ctx, "TAG: PKI Object: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: PKI Object nr. %"SC_FORMAT_LEN_SIZE_T"u", i); if (i >= CAC_MAX_OBJECTS) { free(rbuf); return SC_SUCCESS; } if (cac_parse_properties_object(card, tag, val, len, &prop->objects[i]) == SC_SUCCESS) i++; break; default: /* ignore tags we don't understand */ sc_log(card->ctx, "TAG: Unknown (0x%02x), len=%" SC_FORMAT_LEN_SIZE_T"u", tag, len); break; } } free(rbuf); /* sanity */ if (i != prop->num_objects) sc_log(card->ctx, "The announced number of objects (%u) " "did not match reality (%"SC_FORMAT_LEN_SIZE_T"u)", prop->num_objects, i); prop->num_objects = i; return SC_SUCCESS; } /* * CAC cards use SC_PATH_SELECT_OBJECT_ID rather than SC_PATH_SELECT_FILE_ID. In order to use more * of the PKCS #15 structure, we call the selection SC_PATH_SELECT_FILE_ID, but we set p1 to 2 instead * of 0. Also cac1 does not do any FCI, but it doesn't understand not selecting it. It returns invalid INS * if it doesn't like anything about the select, so we always 'request' FCI for CAC1 * * The rest is just copied from iso7816_select_file */ static int cac_select_file_by_type(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out, int type) { struct sc_context *ctx; struct sc_apdu apdu; unsigned char buf[SC_MAX_APDU_BUFFER_SIZE]; unsigned char pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; int r, pathlen, pathtype; struct sc_file *file = NULL; cac_private_data_t * priv = CAC_DATA(card); assert(card != NULL && in_path != NULL); ctx = card->ctx; SC_FUNC_CALLED(ctx, SC_LOG_DEBUG_VERBOSE); memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; pathtype = in_path->type; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "path->aid=%x %x %x %x %x %x %x len=%"SC_FORMAT_LEN_SIZE_T"u, path->value = %x %x %x %x len=%"SC_FORMAT_LEN_SIZE_T"u path->type=%d (%x)", in_path->aid.value[0], in_path->aid.value[1], in_path->aid.value[2], in_path->aid.value[3], in_path->aid.value[4], in_path->aid.value[5], in_path->aid.value[6], in_path->aid.len, in_path->value[0], in_path->value[1], in_path->value[2], in_path->value[3], in_path->len, in_path->type, in_path->type); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "file_out=%p index=%d count=%d\n", file_out, in_path->index, in_path->count); /* Sigh, sc_key_select expects paths to keys to have specific formats. There is no override. * we have to add some bytes to the path to make it happy. A better fix would be to give sc_key_file * a flag that says 'no, really this path is fine'. We only need to do this for private keys */ if ((pathlen > 2) && (pathlen <= 4) && memcmp(path, "\x3F\x00", 2) == 0) { if (pathlen > 2) { path += 2; pathlen -= 2; } } /* CAC has multiple different type of objects that aren't PKCS #15. When we read * them we need convert them to something PKCS #15 would understand. Find the object * and object type here: */ if (priv) { /* don't record anything if we haven't been initialized yet */ priv->object_type = CAC_OBJECT_TYPE_GENERIC; if (cac_is_cert(priv, in_path)) { priv->object_type = CAC_OBJECT_TYPE_CERT; } /* forget any old cached values */ if (priv->cache_buf) { free(priv->cache_buf); priv->cache_buf = NULL; } priv->cache_buf_len = 0; priv->cached = 0; } if (in_path->aid.len) { if (!pathlen) { memcpy(path, in_path->aid.value, in_path->aid.len); pathlen = in_path->aid.len; pathtype = SC_PATH_TYPE_DF_NAME; } else { /* First, select the application */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"select application" ); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xA4, 4, 0); apdu.data = in_path->aid.value; apdu.datalen = in_path->aid.len; apdu.lc = in_path->aid.len; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) LOG_FUNC_RETURN(ctx, r); } } sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0, 0); switch (pathtype) { /* ideally we would had SC_PATH_TYPE_OBJECT_ID and add code to the iso7816 select. * Unfortunately we'd also need to update the caching code as well. For now just * use FILE_ID and change p1 here */ case SC_PATH_TYPE_FILE_ID: apdu.p1 = 2; if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; break; case SC_PATH_TYPE_DF_NAME: apdu.p1 = 4; break; default: LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); } apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = sc_get_max_recv_size(card) < 256 ? sc_get_max_recv_size(card) : 256; if (file_out != NULL) { apdu.p2 = 0; /* first record, return FCI */ } else { apdu.p2 = 0x0C; } r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); if (file_out == NULL) { /* For some cards 'SELECT' can be only with request to return FCI/FCP. */ r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (apdu.sw1 == 0x6A && apdu.sw2 == 0x86) { apdu.p2 = 0x00; apdu.resplen = sizeof(buf); if (sc_transmit_apdu(card, &apdu) == SC_SUCCESS) r = sc_check_sw(card, apdu.sw1, apdu.sw2); } if (apdu.sw1 == 0x61) LOG_FUNC_RETURN(ctx, SC_SUCCESS); LOG_FUNC_RETURN(ctx, r); } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) LOG_FUNC_RETURN(ctx, r); /* This needs to come after the applet selection */ if (priv && in_path->len >= 2) { /* get applet properties to know if we can treat the * buffer as SimpleLTV and if we have PKI applet. * * Do this only if we select applets for reading * (not during driver initialization) */ cac_properties_t prop; size_t i = -1; r = cac_get_properties(card, &prop); if (r == SC_SUCCESS) { for (i = 0; i < prop.num_objects; i++) { sc_log(card->ctx, "Searching for our OID: 0x%02x 0x%02x = 0x%02x 0x%02x", prop.objects[i].oid[0], prop.objects[i].oid[1], in_path->value[0], in_path->value[1]); if (memcmp(prop.objects[i].oid, in_path->value, 2) == 0) break; } } if (i < prop.num_objects) { if (prop.objects[i].privatekey) priv->object_type = CAC_OBJECT_TYPE_CERT; else if (prop.objects[i].simpletlv == 0) priv->object_type = CAC_OBJECT_TYPE_TLV_FILE; } } /* CAC cards never return FCI, fake one */ file = sc_file_new(); if (file == NULL) LOG_FUNC_RETURN(ctx, SC_ERROR_OUT_OF_MEMORY); file->path = *in_path; file->size = CAC_MAX_SIZE; /* we don't know how big, just give a large size until we can read the file */ *file_out = file; SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } static int cac_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out) { return cac_select_file_by_type(card, in_path, file_out, card->type); } static int cac_finish(sc_card_t *card) { cac_private_data_t * priv = CAC_DATA(card); SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (priv) { cac_free_private_data(priv); } return SC_SUCCESS; } /* select the Card Capabilities Container on CAC-2 */ static int cac_select_CCC(sc_card_t *card) { return cac_select_file_by_type(card, &cac_CCC_Path, NULL, SC_CARD_TYPE_CAC_II); } /* Select ACA in non-standard location */ static int cac_select_ACA(sc_card_t *card) { return cac_select_file_by_type(card, &cac_ACA_Path, NULL, SC_CARD_TYPE_CAC_II); } static int cac_path_from_cardurl(sc_card_t *card, sc_path_t *path, cac_card_url_t *val, int len) { if (len < 10) { return SC_ERROR_INVALID_DATA; } sc_mem_clear(path, sizeof(sc_path_t)); memcpy(path->aid.value, &val->rid, sizeof(val->rid)); memcpy(&path->aid.value[5], &val->applicationID, sizeof(val->applicationID)); path->aid.len = sizeof(val->rid) + sizeof(val->applicationID); memcpy(path->value, &val->objectID, sizeof(val->objectID)); path->len = sizeof(val->objectID); path->type = SC_PATH_TYPE_FILE_ID; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "path->aid=%x %x %x %x %x %x %x len=%"SC_FORMAT_LEN_SIZE_T"u, path->value = %x %x len=%"SC_FORMAT_LEN_SIZE_T"u path->type=%d (%x)", path->aid.value[0], path->aid.value[1], path->aid.value[2], path->aid.value[3], path->aid.value[4], path->aid.value[5], path->aid.value[6], path->aid.len, path->value[0], path->value[1], path->len, path->type, path->type); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "rid=%x %x %x %x %x len=%"SC_FORMAT_LEN_SIZE_T"u appid= %x %x len=%"SC_FORMAT_LEN_SIZE_T"u objid= %x %x len=%"SC_FORMAT_LEN_SIZE_T"u", val->rid[0], val->rid[1], val->rid[2], val->rid[3], val->rid[4], sizeof(val->rid), val->applicationID[0], val->applicationID[1], sizeof(val->applicationID), val->objectID[0], val->objectID[1], sizeof(val->objectID)); return SC_SUCCESS; } static int cac_parse_aid(sc_card_t *card, cac_private_data_t *priv, u8 *aid, int aid_len) { cac_object_t new_object; cac_properties_t prop; size_t i; int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* Search for PKI applets (7 B). Ignore generic objects for now */ if (aid_len != 7 || (memcmp(aid, CAC_1_RID "\x01", 6) != 0 && memcmp(aid, CAC_1_RID "\x00", 6) != 0)) return SC_SUCCESS; sc_mem_clear(&new_object.path, sizeof(sc_path_t)); memcpy(new_object.path.aid.value, aid, aid_len); new_object.path.aid.len = aid_len; /* Call without OID set will just select the AID without subseqent * OID selection, which we need to figure out just now */ cac_select_file_by_type(card, &new_object.path, NULL, SC_CARD_TYPE_CAC_II); r = cac_get_properties(card, &prop); if (r < 0) return SC_ERROR_INTERNAL; for (i = 0; i < prop.num_objects; i++) { /* don't fail just because we have more certs than we can support */ if (priv->cert_next >= MAX_CAC_SLOTS) return SC_SUCCESS; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "ACA: pki_object found, cert_next=%d (%s), privkey=%d", priv->cert_next, cac_labels[priv->cert_next], prop.objects[i].privatekey); /* If the private key is not initialized, we can safely * ignore this object here, but increase the pointer to follow * the certificate labels */ if (!prop.objects[i].privatekey) { priv->cert_next++; continue; } /* OID here has always 2B */ memcpy(new_object.path.value, &prop.objects[i].oid, 2); new_object.path.len = 2; new_object.path.type = SC_PATH_TYPE_FILE_ID; new_object.name = cac_labels[priv->cert_next]; new_object.fd = priv->cert_next+1; cac_add_object_to_list(&priv->pki_list, &new_object); priv->cert_next++; } return SC_SUCCESS; } static int cac_parse_cardurl(sc_card_t *card, cac_private_data_t *priv, cac_card_url_t *val, int len) { cac_object_t new_object; const cac_object_t *obj; unsigned short object_id; int r; r = cac_path_from_cardurl(card, &new_object.path, val, len); if (r != SC_SUCCESS) { return r; } switch (val->cardApplicationType) { case CAC_APP_TYPE_PKI: /* we don't want to overflow the cac_label array. This test could * go way if we create a label function that will create a unique label * from a cert index. */ if (priv->cert_next >= MAX_CAC_SLOTS) break; /* don't fail just because we have more certs than we can support */ new_object.name = cac_labels[priv->cert_next]; new_object.fd = priv->cert_next+1; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"CARDURL: pki_object found, cert_next=%d (%s),", priv->cert_next, new_object.name); cac_add_object_to_list(&priv->pki_list, &new_object); priv->cert_next++; break; case CAC_APP_TYPE_GENERAL: object_id = bebytes2ushort(val->objectID); obj = cac_find_obj_by_id(object_id); if (obj == NULL) break; /* don't fail just because we don't recognize the object */ new_object.name = obj->name; new_object.fd = 0; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"CARDURL: gen_object found, objectID=%x (%s),", object_id, new_object.name); cac_add_object_to_list(&priv->general_list, &new_object); break; case CAC_APP_TYPE_SKI: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"CARDURL: ski_object found"); break; default: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"CARDURL: unknown object_object found (type=0x%02x)", val->cardApplicationType); /* don't fail just because there is an unknown object in the CCC */ break; } return SC_SUCCESS; } static int cac_parse_cuid(sc_card_t *card, cac_private_data_t *priv, cac_cuid_t *val, size_t len) { size_t card_id_len; if (len < sizeof(cac_cuid_t)) { return SC_ERROR_INVALID_DATA; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "gsc_rid=%s", sc_dump_hex(val->gsc_rid, sizeof(val->gsc_rid))); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "manufacture id=%x", val->manufacturer_id); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "cac_type=%d", val->card_type); card_id_len = len - (&val->card_id - (u8 *)val); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "card_id=%s (%"SC_FORMAT_LEN_SIZE_T"u)", sc_dump_hex(&val->card_id, card_id_len), card_id_len); priv->cuid = *val; priv->cac_id = malloc(card_id_len); if (priv->cac_id == NULL) { return SC_ERROR_OUT_OF_MEMORY; } memcpy(priv->cac_id, &val->card_id, card_id_len); priv->cac_id_len = card_id_len; return SC_SUCCESS; } static int cac_process_CCC(sc_card_t *card, cac_private_data_t *priv); static int cac_parse_CCC(sc_card_t *card, cac_private_data_t *priv, u8 *tl, size_t tl_len, u8 *val, size_t val_len) { size_t len = 0; u8 *tl_end = tl + tl_len; u8 *val_end = val + val_len; sc_path_t new_path; int r; for (; (tl < tl_end) && (val< val_end); val += len) { /* get the tag and the length */ u8 tag; if (sc_simpletlv_read_tag(&tl, tl_end - tl, &tag, &len) != SC_SUCCESS) break; switch (tag) { case CAC_TAG_CUID: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG:CUID"); r = cac_parse_cuid(card, priv, (cac_cuid_t *)val, len); if (r < 0) return r; break; case CAC_TAG_CC_VERSION_NUMBER: if (len != 1) { sc_log(card->ctx, "TAG: CC Version: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } /* ignore the version numbers for now */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: CC Version = 0x%02x", *val); break; case CAC_TAG_GRAMMAR_VERION_NUMBER: if (len != 1) { sc_log(card->ctx, "TAG: Grammar Version: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } /* ignore the version numbers for now */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Grammar Version = 0x%02x", *val); break; case CAC_TAG_CARDURL: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG:CARDURL"); r = cac_parse_cardurl(card, priv, (cac_card_url_t *)val, len); if (r < 0) return r; break; /* * The following are really for file systems cards. This code only cares about CAC VM cards */ case CAC_TAG_PKCS15: if (len != 1) { sc_log(card->ctx, "TAG: PKCS15: " "Invalid length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } /* TODO should verify that this is '0'. If it's not * zero, we should drop out of here and let the PKCS 15 * code handle this card */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG: PKCS15 = 0x%02x", *val); break; case CAC_TAG_DATA_MODEL: case CAC_TAG_CARD_APDU: case CAC_TAG_CAPABILITY_TUPLES: case CAC_TAG_STATUS_TUPLES: case CAC_TAG_REDIRECTION: case CAC_TAG_ERROR_CODES: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG:FSSpecific(0x%02x)", tag); break; case CAC_TAG_ACCESS_CONTROL: /* TODO handle access control later */ sc_log_hex(card->ctx, "TAG:ACCESS Control", val, len); break; case CAC_TAG_NEXT_CCC: sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG:NEXT CCC"); r = cac_path_from_cardurl(card, &new_path, (cac_card_url_t *)val, len); if (r < 0) return r; r = cac_select_file_by_type(card, &new_path, NULL, SC_CARD_TYPE_CAC_II); if (r < 0) return r; r = cac_process_CCC(card, priv); if (r < 0) return r; break; default: /* ignore tags we don't understand */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"TAG:Unknown (0x%02x)",tag ); break; } } return SC_SUCCESS; } static int cac_process_CCC(sc_card_t *card, cac_private_data_t *priv) { u8 *tl = NULL, *val = NULL; size_t tl_len, val_len; int r; r = cac_read_file(card, CAC_FILE_TAG, &tl, &tl_len); if (r < 0) goto done; r = cac_read_file(card, CAC_FILE_VALUE, &val, &val_len); if (r < 0) goto done; r = cac_parse_CCC(card, priv, tl, tl_len, val, val_len); done: if (tl) free(tl); if (val) free(val); return r; } /* Service Applet Table (Table 5-21) should list all the applets on the * card, which is a good start if we don't have CCC */ static int cac_parse_ACA_service(sc_card_t *card, cac_private_data_t *priv, u8 *val, size_t val_len) { size_t len = 0; u8 *val_end = val + val_len; int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); for (; val < val_end; val += len) { /* get the tag and the length */ u8 tag; if (sc_simpletlv_read_tag(&val, val_end - val, &tag, &len) != SC_SUCCESS) break; switch (tag) { case CAC_TAG_APPLET_FAMILY: if (len != 5) { sc_log(card->ctx, "TAG: Applet Information: " "bad length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Applet Information: Family: 0x%02x", val[0]); sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, " Applet Version: 0x%02x 0x%02x 0x%02x 0x%02x", val[1], val[2], val[3], val[4]); break; case CAC_TAG_NUMBER_APPLETS: if (len != 1) { sc_log(card->ctx, "TAG: Num applets: " "bad length %"SC_FORMAT_LEN_SIZE_T"u", len); break; } sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Num applets = %hhd", *val); break; case CAC_TAG_APPLET_ENTRY: /* Make sure we match the outer length */ if (len < 3 || val[2] != len - 3) { sc_log(card->ctx, "TAG: Applet Entry: " "bad length (%"SC_FORMAT_LEN_SIZE_T "u) or length of internal buffer", len); break; } sc_debug_hex(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Applet Entry: AID", &val[3], val[2]); /* This is SimpleTLV prefixed with applet ID (1B) */ r = cac_parse_aid(card, priv, &val[3], val[2]); if (r < 0) return r; break; default: /* ignore tags we don't understand */ sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "TAG: Unknown (0x%02x)", tag); break; } } return SC_SUCCESS; } /* select a CAC pki applet by index */ static int cac_select_pki_applet(sc_card_t *card, int index) { sc_path_t applet_path = cac_cac_pki_obj.path; applet_path.aid.value[applet_path.aid.len-1] = index; return cac_select_file_by_type(card, &applet_path, NULL, SC_CARD_TYPE_CAC_II); } /* * Find the first existing CAC applet. If none found, then this isn't a CAC */ static int cac_find_first_pki_applet(sc_card_t *card, int *index_out) { int r, i; for (i = 0; i < MAX_CAC_SLOTS; i++) { r = cac_select_pki_applet(card, i); if (r == SC_SUCCESS) { /* Try to read first two bytes of the buffer to * make sure it is not just malfunctioning card */ u8 params[2] = {CAC_FILE_TAG, 2}; u8 data[2], *out_ptr = data; size_t len = 2; r = cac_apdu_io(card, CAC_INS_READ_FILE, 0, 0, &params[0], sizeof(params), &out_ptr, &len); if (r != 2) continue; *index_out = i; return SC_SUCCESS; } } return SC_ERROR_OBJECT_NOT_FOUND; } /* * This emulates CCC for Alt tokens, that do not come with CCC nor ACA applets */ static int cac_populate_cac_alt(sc_card_t *card, int index, cac_private_data_t *priv) { int r, i; cac_object_t pki_obj = cac_cac_pki_obj; u8 buf[100]; u8 *val; size_t val_len; /* populate PKI objects */ for (i = index; i < MAX_CAC_SLOTS; i++) { r = cac_select_pki_applet(card, i); if (r == SC_SUCCESS) { pki_obj.name = cac_labels[i]; sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "CAC: pki_object found, cert_next=%d (%s),", i, pki_obj.name); pki_obj.path.aid.value[pki_obj.path.aid.len-1] = i; pki_obj.fd = i+1; /* don't use id of zero */ cac_add_object_to_list(&priv->pki_list, &pki_obj); } } /* populate non-PKI objects */ for (i=0; i < cac_object_count; i++) { r = cac_select_file_by_type(card, &cac_objects[i].path, NULL, SC_CARD_TYPE_CAC_II); if (r == SC_SUCCESS) { sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "CAC: obj_object found, cert_next=%d (%s),", i, cac_objects[i].name); cac_add_object_to_list(&priv->general_list, &cac_objects[i]); } } /* * create a cuid to simulate the cac 2 cuid. */ priv->cuid = cac_cac_cuid; /* create a serial number by hashing the first 100 bytes of the * first certificate on the card */ r = cac_select_pki_applet(card, index); if (r < 0) { return r; /* shouldn't happen unless the card has been removed or is malfunctioning */ } val = buf; val_len = cac_read_binary(card, 0, val, sizeof(buf), 0); if (val_len > 0) { priv->cac_id = malloc(20); if (priv->cac_id == NULL) { return SC_ERROR_OUT_OF_MEMORY; } #ifdef ENABLE_OPENSSL SHA1(val, val_len, priv->cac_id); priv->cac_id_len = 20; sc_debug_hex(card->ctx, SC_LOG_DEBUG_VERBOSE, "cuid", priv->cac_id, priv->cac_id_len); #else sc_log(card->ctx, "OpenSSL Required"); return SC_ERROR_NOT_SUPPORTED; #endif /* ENABLE_OPENSSL */ } return SC_SUCCESS; } static int cac_process_ACA(sc_card_t *card, cac_private_data_t *priv) { int r; u8 *val = NULL; size_t val_len; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* Assuming ACA is already selected */ r = cac_get_acr(card, CAC_ACR_SERVICE, &val, &val_len); if (r < 0) goto done; r = cac_parse_ACA_service(card, priv, val, val_len); if (r == SC_SUCCESS) { priv->aca_path = malloc(sizeof(sc_path_t)); if (!priv->aca_path) { r = SC_ERROR_OUT_OF_MEMORY; goto done; } memcpy(priv->aca_path, &cac_ACA_Path, sizeof(sc_path_t)); } done: if (val) free(val); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } /* * Look for a CAC card. If it exists, initialize our data structures */ static int cac_find_and_initialize(sc_card_t *card, int initialize) { int r, index; cac_private_data_t *priv = NULL; /* already initialized? */ if (card->drv_data) { return SC_SUCCESS; } /* is this a CAC-2 specified in NIST Interagency Report 6887 - * "Government Smart Card Interoperability Specification v2.1 July 2003" */ r = cac_select_CCC(card); if (r == SC_SUCCESS) { sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "CCC found, is CAC-2"); if (!initialize) /* match card only */ return r; priv = cac_new_private_data(); if (!priv) return SC_ERROR_OUT_OF_MEMORY; r = cac_process_CCC(card, priv); if (r == SC_SUCCESS) { card->type = SC_CARD_TYPE_CAC_II; card->drv_data = priv; return r; } } /* Even some ALT tokens can be missing CCC so we should try with ACA */ r = cac_select_ACA(card); if (r == SC_SUCCESS) { sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "ACA found, is CAC-2 without CCC"); if (!initialize) /* match card only */ return r; if (!priv) { priv = cac_new_private_data(); if (!priv) return SC_ERROR_OUT_OF_MEMORY; } r = cac_process_ACA(card, priv); if (r == SC_SUCCESS) { card->type = SC_CARD_TYPE_CAC_II; card->drv_data = priv; return r; } } /* is this a CAC Alt token without any accompanying structures */ r = cac_find_first_pki_applet(card, &index); if (r == SC_SUCCESS) { sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE, "PKI applet found, is bare CAC Alt"); if (!initialize) /* match card only */ return r; if (!priv) { priv = cac_new_private_data(); if (!priv) return SC_ERROR_OUT_OF_MEMORY; } card->drv_data = priv; /* needed for the read_binary() */ r = cac_populate_cac_alt(card, index, priv); if (r == SC_SUCCESS) { card->type = SC_CARD_TYPE_CAC_II; return r; } card->drv_data = NULL; /* reset on failure */ } if (priv) { cac_free_private_data(priv); } return r; } /* NOTE: returns a bool, 1 card matches, 0 it does not */ static int cac_match_card(sc_card_t *card) { int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); /* Since we send an APDU, the card's logout function may be called... * however it may be in dirty memory */ card->ops->logout = NULL; r = cac_find_and_initialize(card, 0); return (r == SC_SUCCESS); /* never match */ } static int cac_init(sc_card_t *card) { int r; unsigned long flags; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); r = cac_find_and_initialize(card, 1); if (r < 0) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_CARD); } flags = SC_ALGORITHM_RSA_RAW; _sc_card_add_rsa_alg(card, 1024, flags, 0); /* mandatory */ _sc_card_add_rsa_alg(card, 2048, flags, 0); /* optional */ _sc_card_add_rsa_alg(card, 3072, flags, 0); /* optional */ card->caps |= SC_CARD_CAP_RNG | SC_CARD_CAP_ISO7816_PIN_INFO; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_SUCCESS); } static int cac_pin_cmd(sc_card_t *card, struct sc_pin_cmd_data *data, int *tries_left) { /* CAC, like PIV needs Extra validation of (new) PIN during * a PIN change request, to ensure it's not outside the * FIPS 201 4.1.6.1 (numeric only) and * FIPS 140-2 * (6 character minimum) requirements. */ struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (data->cmd == SC_PIN_CMD_CHANGE) { int i = 0; if (data->pin2.len < 6) { return SC_ERROR_INVALID_PIN_LENGTH; } for(i=0; i < data->pin2.len; ++i) { if (!isdigit(data->pin2.data[i])) { return SC_ERROR_INVALID_DATA; } } } return iso_drv->ops->pin_cmd(card, data, tries_left); } static struct sc_card_operations cac_ops; static struct sc_card_driver cac_drv = { "Common Access Card (CAC)", "cac", &cac_ops, NULL, 0, NULL }; static struct sc_card_driver * sc_get_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); cac_ops = *iso_drv->ops; cac_ops.match_card = cac_match_card; cac_ops.init = cac_init; cac_ops.finish = cac_finish; cac_ops.select_file = cac_select_file; /* need to record object type */ cac_ops.get_challenge = cac_get_challenge; cac_ops.read_binary = cac_read_binary; cac_ops.write_binary = cac_write_binary; cac_ops.set_security_env = cac_set_security_env; cac_ops.restore_security_env = cac_restore_security_env; cac_ops.compute_signature = cac_compute_signature; cac_ops.decipher = cac_decipher; cac_ops.card_ctl = cac_card_ctl; cac_ops.pin_cmd = cac_pin_cmd; return &cac_drv; } struct sc_card_driver * sc_get_cac_driver(void) { return sc_get_driver(); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_345_0
crossvul-cpp_data_bad_292_0
/* Pango * pango-emoji.c: Emoji handling * * Copyright (C) 2017 Google, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Implementation of pango_emoji_iter is derived from Chromium: * * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/fonts/FontFallbackPriority.h * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/text/CharacterEmoji.cpp * https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/fonts/SymbolsIterator.cpp * * // Copyright 2015 The Chromium Authors. All rights reserved. * // Use of this source code is governed by a BSD-style license that can be * // found in the LICENSE file. */ #include "config.h" #include <stdlib.h> #include <string.h> #include "pango-emoji-private.h" #include "pango-emoji-table.h" static int interval_compare (const void *key, const void *elt) { gunichar c = GPOINTER_TO_UINT (key); struct Interval *interval = (struct Interval *)elt; if (c < interval->start) return -1; if (c > interval->end) return +1; return 0; } #define DEFINE_pango_Is_(name) \ static gboolean \ _pango_Is_##name (gunichar ch) \ { \ /* bsearch() is declared attribute(nonnull(1)) so we can't validly search \ * for a NULL key */ \ /* \ if (G_UNLIKELY (ch == 0)) \ return FALSE; \ */ \ \ if (bsearch (GUINT_TO_POINTER (ch), \ _pango_##name##_table, \ G_N_ELEMENTS (_pango_##name##_table), \ sizeof _pango_##name##_table[0], \ interval_compare)) \ return TRUE; \ \ return FALSE; \ } DEFINE_pango_Is_(Emoji) DEFINE_pango_Is_(Emoji_Presentation) DEFINE_pango_Is_(Emoji_Modifier) DEFINE_pango_Is_(Emoji_Modifier_Base) static gboolean _pango_Is_Emoji_Text_Default (gunichar ch) { return _pango_Is_Emoji (ch) && !_pango_Is_Emoji_Presentation (ch); } static gboolean _pango_Is_Emoji_Emoji_Default (gunichar ch) { return _pango_Is_Emoji_Presentation (ch); } static gboolean _pango_Is_Emoji_Keycap_Base (gunichar ch) { return (ch >= '0' && ch <= '9') || ch == '#' || ch == '*'; } static gboolean _pango_Is_Regional_Indicator (gunichar ch) { return (ch >= 0x1F1E6 && ch <= 0x1F1FF); } const gunichar kCombiningEnclosingCircleBackslashCharacter = 0x20E0; const gunichar kCombiningEnclosingKeycapCharacter = 0x20E3; const gunichar kEyeCharacter = 0x1F441; const gunichar kFemaleSignCharacter = 0x2640; const gunichar kLeftSpeechBubbleCharacter = 0x1F5E8; const gunichar kMaleSignCharacter = 0x2642; const gunichar kRainbowCharacter = 0x1F308; const gunichar kStaffOfAesculapiusCharacter = 0x2695; const gunichar kVariationSelector15Character = 0xFE0E; const gunichar kVariationSelector16Character = 0xFE0F; const gunichar kWavingWhiteFlagCharacter = 0x1F3F3; const gunichar kZeroWidthJoinerCharacter = 0x200D; typedef enum { PANGO_EMOJI_TYPE_INVALID, PANGO_EMOJI_TYPE_TEXT, /* For regular non-symbols text */ PANGO_EMOJI_TYPE_EMOJI_TEXT, /* For emoji in text presentaiton */ PANGO_EMOJI_TYPE_EMOJI_EMOJI /* For emoji in emoji presentation */ } PangoEmojiType; static PangoEmojiType _pango_get_emoji_type (gunichar codepoint) { /* Those should only be Emoji presentation as combinations of two. */ if (_pango_Is_Emoji_Keycap_Base (codepoint) || _pango_Is_Regional_Indicator (codepoint)) return PANGO_EMOJI_TYPE_TEXT; if (codepoint == kCombiningEnclosingKeycapCharacter) return PANGO_EMOJI_TYPE_EMOJI_EMOJI; if (_pango_Is_Emoji_Emoji_Default (codepoint) || _pango_Is_Emoji_Modifier_Base (codepoint) || _pango_Is_Emoji_Modifier (codepoint)) return PANGO_EMOJI_TYPE_EMOJI_EMOJI; if (_pango_Is_Emoji_Text_Default (codepoint)) return PANGO_EMOJI_TYPE_EMOJI_TEXT; return PANGO_EMOJI_TYPE_TEXT; } PangoEmojiIter * _pango_emoji_iter_init (PangoEmojiIter *iter, const char *text, int length) { iter->text_start = text; if (length >= 0) iter->text_end = text + length; else iter->text_end = text + strlen (text); iter->start = text; iter->end = text; iter->is_emoji = (gboolean) 2; /* HACK */ _pango_emoji_iter_next (iter); return iter; } void _pango_emoji_iter_fini (PangoEmojiIter *iter) { } #define PANGO_EMOJI_TYPE_IS_EMOJI(typ) ((typ) == PANGO_EMOJI_TYPE_EMOJI_EMOJI) gboolean _pango_emoji_iter_next (PangoEmojiIter *iter) { PangoEmojiType current_emoji_type = PANGO_EMOJI_TYPE_INVALID; if (iter->end == iter->text_end) return FALSE; iter->start = iter->end; for (; iter->end < iter->text_end; iter->end = g_utf8_next_char (iter->end)) { gunichar ch = g_utf8_get_char (iter->end); /* Except at the beginning, ZWJ just carries over the emoji or neutral * text type, VS15 & VS16 we just carry over as well, since we already * resolved those through lookahead. Also, don't downgrade to text * presentation for emoji that are part of a ZWJ sequence, example * U+1F441 U+200D U+1F5E8, eye (text presentation) + ZWJ + left speech * bubble, see below. */ if ((!(ch == kZeroWidthJoinerCharacter && !iter->is_emoji) && ch != kVariationSelector15Character && ch != kVariationSelector16Character && ch != kCombiningEnclosingCircleBackslashCharacter && !_pango_Is_Regional_Indicator(ch) && !((ch == kLeftSpeechBubbleCharacter || ch == kRainbowCharacter || ch == kMaleSignCharacter || ch == kFemaleSignCharacter || ch == kStaffOfAesculapiusCharacter) && !iter->is_emoji)) || current_emoji_type == PANGO_EMOJI_TYPE_INVALID) { current_emoji_type = _pango_get_emoji_type (ch); } if (g_utf8_next_char (iter->end) < iter->text_end) /* Optimize. */ { gunichar peek_char = g_utf8_get_char (g_utf8_next_char (iter->end)); /* Variation Selectors */ if (current_emoji_type == PANGO_EMOJI_TYPE_EMOJI_EMOJI && peek_char == kVariationSelector15Character) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_TEXT; } if ((current_emoji_type == PANGO_EMOJI_TYPE_EMOJI_TEXT || _pango_Is_Emoji_Keycap_Base(ch)) && peek_char == kVariationSelector16Character) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } /* Combining characters Keycap... */ if (_pango_Is_Emoji_Keycap_Base(ch) && peek_char == kCombiningEnclosingKeycapCharacter) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; }; /* Regional indicators */ if (_pango_Is_Regional_Indicator(ch) && _pango_Is_Regional_Indicator(peek_char)) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } /* Upgrade text presentation emoji to emoji presentation when followed by * ZWJ, Example U+1F441 U+200D U+1F5E8, eye + ZWJ + left speech bubble. */ if ((ch == kEyeCharacter || ch == kWavingWhiteFlagCharacter) && peek_char == kZeroWidthJoinerCharacter) { current_emoji_type = PANGO_EMOJI_TYPE_EMOJI_EMOJI; } } if (iter->is_emoji == (gboolean) 2) iter->is_emoji = !PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); if (iter->is_emoji == PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type)) { iter->is_emoji = !PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); return TRUE; } } iter->is_emoji = PANGO_EMOJI_TYPE_IS_EMOJI (current_emoji_type); return TRUE; } /********************************************************** * End of code from Chromium **********************************************************/
./CrossVul/dataset_final_sorted/CWE-119/c/bad_292_0
crossvul-cpp_data_good_5766_0
/* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the NetFilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/workqueue.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include <net/ip.h> #ifdef CONFIG_IP_VS_IPV6 #include <net/ipv6.h> #include <net/ip6_route.h> #endif #include <net/route.h> #include <net/sock.h> #include <net/genetlink.h> #include <asm/uaccess.h> #include <net/ip_vs.h> /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ static DEFINE_MUTEX(__ip_vs_mutex); /* lock for service table */ static DEFINE_RWLOCK(__ip_vs_svc_lock); /* lock for table with the real services */ static DEFINE_RWLOCK(__ip_vs_rs_lock); /* lock for state and timeout tables */ static DEFINE_RWLOCK(__ip_vs_securetcp_lock); /* lock for drop entry handling */ static DEFINE_SPINLOCK(__ip_vs_dropentry_lock); /* lock for drop packet handling */ static DEFINE_SPINLOCK(__ip_vs_droppacket_lock); /* 1/rate drop and drop-entry variables */ int ip_vs_drop_rate = 0; int ip_vs_drop_counter = 0; static atomic_t ip_vs_dropentry = ATOMIC_INIT(0); /* number of virtual services */ static int ip_vs_num_services = 0; /* sysctl variables */ static int sysctl_ip_vs_drop_entry = 0; static int sysctl_ip_vs_drop_packet = 0; static int sysctl_ip_vs_secure_tcp = 0; static int sysctl_ip_vs_amemthresh = 1024; static int sysctl_ip_vs_am_droprate = 10; int sysctl_ip_vs_cache_bypass = 0; int sysctl_ip_vs_expire_nodest_conn = 0; int sysctl_ip_vs_expire_quiescent_template = 0; int sysctl_ip_vs_sync_threshold[2] = { 3, 50 }; int sysctl_ip_vs_nat_icmp_send = 0; #ifdef CONFIG_IP_VS_DEBUG static int sysctl_ip_vs_debug_level = 0; int ip_vs_get_debug_level(void) { return sysctl_ip_vs_debug_level; } #endif #ifdef CONFIG_IP_VS_IPV6 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr) { struct rt6_info *rt; struct flowi fl = { .oif = 0, .nl_u = { .ip6_u = { .daddr = *addr, .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } }, }; rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK)) return 1; return 0; } #endif /* * update_defense_level is called from keventd and from sysctl, * so it needs to protect itself from softirqs */ static void update_defense_level(void) { struct sysinfo i; static int old_secure_tcp = 0; int availmem; int nomem; int to_change = -1; /* we only count free and buffered memory (in pages) */ si_meminfo(&i); availmem = i.freeram + i.bufferram; /* however in linux 2.5 the i.bufferram is total page cache size, we need adjust it */ /* si_swapinfo(&i); */ /* availmem = availmem - (i.totalswap - i.freeswap); */ nomem = (availmem < sysctl_ip_vs_amemthresh); local_bh_disable(); /* drop_entry */ spin_lock(&__ip_vs_dropentry_lock); switch (sysctl_ip_vs_drop_entry) { case 0: atomic_set(&ip_vs_dropentry, 0); break; case 1: if (nomem) { atomic_set(&ip_vs_dropentry, 1); sysctl_ip_vs_drop_entry = 2; } else { atomic_set(&ip_vs_dropentry, 0); } break; case 2: if (nomem) { atomic_set(&ip_vs_dropentry, 1); } else { atomic_set(&ip_vs_dropentry, 0); sysctl_ip_vs_drop_entry = 1; }; break; case 3: atomic_set(&ip_vs_dropentry, 1); break; } spin_unlock(&__ip_vs_dropentry_lock); /* drop_packet */ spin_lock(&__ip_vs_droppacket_lock); switch (sysctl_ip_vs_drop_packet) { case 0: ip_vs_drop_rate = 0; break; case 1: if (nomem) { ip_vs_drop_rate = ip_vs_drop_counter = sysctl_ip_vs_amemthresh / (sysctl_ip_vs_amemthresh-availmem); sysctl_ip_vs_drop_packet = 2; } else { ip_vs_drop_rate = 0; } break; case 2: if (nomem) { ip_vs_drop_rate = ip_vs_drop_counter = sysctl_ip_vs_amemthresh / (sysctl_ip_vs_amemthresh-availmem); } else { ip_vs_drop_rate = 0; sysctl_ip_vs_drop_packet = 1; } break; case 3: ip_vs_drop_rate = sysctl_ip_vs_am_droprate; break; } spin_unlock(&__ip_vs_droppacket_lock); /* secure_tcp */ write_lock(&__ip_vs_securetcp_lock); switch (sysctl_ip_vs_secure_tcp) { case 0: if (old_secure_tcp >= 2) to_change = 0; break; case 1: if (nomem) { if (old_secure_tcp < 2) to_change = 1; sysctl_ip_vs_secure_tcp = 2; } else { if (old_secure_tcp >= 2) to_change = 0; } break; case 2: if (nomem) { if (old_secure_tcp < 2) to_change = 1; } else { if (old_secure_tcp >= 2) to_change = 0; sysctl_ip_vs_secure_tcp = 1; } break; case 3: if (old_secure_tcp < 2) to_change = 1; break; } old_secure_tcp = sysctl_ip_vs_secure_tcp; if (to_change >= 0) ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); write_unlock(&__ip_vs_securetcp_lock); local_bh_enable(); } /* * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ static void defense_work_handler(struct work_struct *work); static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); static void defense_work_handler(struct work_struct *work) { update_defense_level(); if (atomic_read(&ip_vs_dropentry)) ip_vs_random_dropentry(); schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); } int ip_vs_use_count_inc(void) { return try_module_get(THIS_MODULE); } void ip_vs_use_count_dec(void) { module_put(THIS_MODULE); } /* * Hash table: for virtual service lookups */ #define IP_VS_SVC_TAB_BITS 8 #define IP_VS_SVC_TAB_SIZE (1 << IP_VS_SVC_TAB_BITS) #define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1) /* the service table hashed by <protocol, addr, port> */ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE]; /* the service table hashed by fwmark */ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; /* * Hash table: for real service lookups */ #define IP_VS_RTAB_BITS 4 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE]; /* * Trash for destinations */ static LIST_HEAD(ip_vs_dest_trash); /* * FTP & NULL virtual service counters */ static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0); static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0); /* * Returns hash value for virtual service */ static __inline__ unsigned ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr, __be16 port) { register unsigned porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth) & IP_VS_SVC_TAB_MASK; } /* * Returns hash value of fwmark for virtual service lookup */ static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark) { return fwmark & IP_VS_SVC_TAB_MASK; } /* * Hashes a service in the ip_vs_svc_table by <proto,addr,port> * or in the ip_vs_svc_fwm_table by fwmark. * Should be called with locked tables. */ static int ip_vs_svc_hash(struct ip_vs_service *svc) { unsigned hash; if (svc->flags & IP_VS_SVC_F_HASHED) { pr_err("%s(): request for already hashed, called from %pF\n", __func__, __builtin_return_address(0)); return 0; } if (svc->fwmark == 0) { /* * Hash it by <protocol,addr,port> in ip_vs_svc_table */ hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr, svc->port); list_add(&svc->s_list, &ip_vs_svc_table[hash]); } else { /* * Hash it by fwmark in ip_vs_svc_fwm_table */ hash = ip_vs_svc_fwm_hashkey(svc->fwmark); list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]); } svc->flags |= IP_VS_SVC_F_HASHED; /* increase its refcnt because it is referenced by the svc table */ atomic_inc(&svc->refcnt); return 1; } /* * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table. * Should be called with locked tables. */ static int ip_vs_svc_unhash(struct ip_vs_service *svc) { if (!(svc->flags & IP_VS_SVC_F_HASHED)) { pr_err("%s(): request for unhash flagged, called from %pF\n", __func__, __builtin_return_address(0)); return 0; } if (svc->fwmark == 0) { /* Remove it from the ip_vs_svc_table table */ list_del(&svc->s_list); } else { /* Remove it from the ip_vs_svc_fwm_table table */ list_del(&svc->f_list); } svc->flags &= ~IP_VS_SVC_F_HASHED; atomic_dec(&svc->refcnt); return 1; } /* * Get service by {proto,addr,port} in the service table. */ static inline struct ip_vs_service * __ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { unsigned hash; struct ip_vs_service *svc; /* Check for "full" addressed entries */ hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport); list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ if ((svc->af == af) && ip_vs_addr_equal(af, &svc->addr, vaddr) && (svc->port == vport) && (svc->protocol == protocol)) { /* HIT */ atomic_inc(&svc->usecnt); return svc; } } return NULL; } /* * Get service by {fwmark} in the service table. */ static inline struct ip_vs_service * __ip_vs_svc_fwm_get(int af, __u32 fwmark) { unsigned hash; struct ip_vs_service *svc; /* Check for fwmark addressed entries */ hash = ip_vs_svc_fwm_hashkey(fwmark); list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { if (svc->fwmark == fwmark && svc->af == af) { /* HIT */ atomic_inc(&svc->usecnt); return svc; } } return NULL; } struct ip_vs_service * ip_vs_service_get(int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { struct ip_vs_service *svc; read_lock(&__ip_vs_svc_lock); /* * Check the table hashed by fwmark first */ if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark))) goto out; /* * Check the table hashed by <protocol,addr,port> * for "full" addressed entries */ svc = __ip_vs_service_get(af, protocol, vaddr, vport); if (svc == NULL && protocol == IPPROTO_TCP && atomic_read(&ip_vs_ftpsvc_counter) && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) { /* * Check if ftp service entry exists, the packet * might belong to FTP data connections. */ svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT); } if (svc == NULL && atomic_read(&ip_vs_nullsvc_counter)) { /* * Check if the catch-all port (port zero) exists */ svc = __ip_vs_service_get(af, protocol, vaddr, 0); } out: read_unlock(&__ip_vs_svc_lock); IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n", fwmark, ip_vs_proto_name(protocol), IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), svc ? "hit" : "not hit"); return svc; } static inline void __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) { atomic_inc(&svc->refcnt); dest->svc = svc; } static inline void __ip_vs_unbind_svc(struct ip_vs_dest *dest) { struct ip_vs_service *svc = dest->svc; dest->svc = NULL; if (atomic_dec_and_test(&svc->refcnt)) kfree(svc); } /* * Returns hash value for real service */ static inline unsigned ip_vs_rs_hashkey(int af, const union nf_inet_addr *addr, __be16 port) { register unsigned porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth) & IP_VS_RTAB_MASK; } /* * Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>. * should be called with locked tables. */ static int ip_vs_rs_hash(struct ip_vs_dest *dest) { unsigned hash; if (!list_empty(&dest->d_list)) { return 0; } /* * Hash by proto,addr,port, * which are the parameters of the real service. */ hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); list_add(&dest->d_list, &ip_vs_rtable[hash]); return 1; } /* * UNhashes ip_vs_dest from ip_vs_rtable. * should be called with locked tables. */ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) { /* * Remove it from the ip_vs_rtable table. */ if (!list_empty(&dest->d_list)) { list_del(&dest->d_list); INIT_LIST_HEAD(&dest->d_list); } return 1; } /* * Lookup real service by <proto,addr,port> in the real service table. */ struct ip_vs_dest * ip_vs_lookup_real_service(int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport) { unsigned hash; struct ip_vs_dest *dest; /* * Check for "full" addressed entries * Return the first found entry */ hash = ip_vs_rs_hashkey(af, daddr, dport); read_lock(&__ip_vs_rs_lock); list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { if ((dest->af == af) && ip_vs_addr_equal(af, &dest->addr, daddr) && (dest->port == dport) && ((dest->protocol == protocol) || dest->vfwmark)) { /* HIT */ read_unlock(&__ip_vs_rs_lock); return dest; } } read_unlock(&__ip_vs_rs_lock); return NULL; } /* * Lookup destination by {addr,port} in the given service */ static struct ip_vs_dest * ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest; /* * Find the destination for the given service */ list_for_each_entry(dest, &svc->destinations, n_list) { if ((dest->af == svc->af) && ip_vs_addr_equal(svc->af, &dest->addr, daddr) && (dest->port == dport)) { /* HIT */ return dest; } } return NULL; } /* * Find destination by {daddr,dport,vaddr,protocol} * Cretaed to be used in ip_vs_process_message() in * the backup synchronization daemon. It finds the * destination to be bound to the received connection * on the backup. * * ip_vs_lookup_real_service() looked promissing, but * seems not working as expected. */ struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol) { struct ip_vs_dest *dest; struct ip_vs_service *svc; svc = ip_vs_service_get(af, 0, protocol, vaddr, vport); if (!svc) return NULL; dest = ip_vs_lookup_dest(svc, daddr, dport); if (dest) atomic_inc(&dest->refcnt); ip_vs_service_put(svc); return dest; } /* * Lookup dest by {svc,addr,port} in the destination trash. * The destination trash is used to hold the destinations that are removed * from the service table but are still referenced by some conn entries. * The reason to add the destination trash is when the dest is temporary * down (either by administrator or by monitor program), the dest can be * picked back from the trash, the remaining connections to the dest can * continue, and the counting information of the dest is also useful for * scheduling. */ static struct ip_vs_dest * ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest, *nxt; /* * Find the destination in trash */ list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " "dest->refcnt=%d\n", dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); if (dest->af == svc->af && ip_vs_addr_equal(svc->af, &dest->addr, daddr) && dest->port == dport && dest->vfwmark == svc->fwmark && dest->protocol == svc->protocol && (svc->fwmark || (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && dest->vport == svc->port))) { /* HIT */ return dest; } /* * Try to purge the destination from trash if not referenced */ if (atomic_read(&dest->refcnt) == 1) { IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u " "from trash\n", dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); kfree(dest); } } return NULL; } /* * Clean up all the destinations in the trash * Called by the ip_vs_control_cleanup() * * When the ip_vs_control_clearup is activated by ipvs module exit, * the service tables must have been flushed and all the connections * are expired, and the refcnt of each destination in the trash must * be 1, so we simply release them here. */ static void ip_vs_trash_cleanup(void) { struct ip_vs_dest *dest, *nxt; list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { list_del(&dest->n_list); ip_vs_dst_reset(dest); __ip_vs_unbind_svc(dest); kfree(dest); } } static void ip_vs_zero_stats(struct ip_vs_stats *stats) { spin_lock_bh(&stats->lock); memset(&stats->ustats, 0, sizeof(stats->ustats)); ip_vs_zero_estimator(stats); spin_unlock_bh(&stats->lock); } /* * Update a destination in the given service */ static void __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest) { int conn_flags; /* set the weight and the flags */ atomic_set(&dest->weight, udest->weight); conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; /* check if local node and update the flags */ #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) { if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) { conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | IP_VS_CONN_F_LOCALNODE; } } else #endif if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) { conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | IP_VS_CONN_F_LOCALNODE; } /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { conn_flags |= IP_VS_CONN_F_NOOUTPUT; } else { /* * Put the real service in ip_vs_rtable if not present. * For now only for NAT! */ write_lock_bh(&__ip_vs_rs_lock); ip_vs_rs_hash(dest); write_unlock_bh(&__ip_vs_rs_lock); } atomic_set(&dest->conn_flags, conn_flags); /* bind the service */ if (!dest->svc) { __ip_vs_bind_svc(dest, svc); } else { if (dest->svc != svc) { __ip_vs_unbind_svc(dest); ip_vs_zero_stats(&dest->stats); __ip_vs_bind_svc(dest, svc); } } /* set the dest status flags */ dest->flags |= IP_VS_DEST_F_AVAILABLE; if (udest->u_threshold == 0 || udest->u_threshold > dest->u_threshold) dest->flags &= ~IP_VS_DEST_F_OVERLOAD; dest->u_threshold = udest->u_threshold; dest->l_threshold = udest->l_threshold; } /* * Create a destination for the given service */ static int ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, struct ip_vs_dest **dest_p) { struct ip_vs_dest *dest; unsigned atype; EnterFunction(2); #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) { atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && !__ip_vs_addr_is_local_v6(&udest->addr.in6)) return -EINVAL; } else #endif { atype = inet_addr_type(&init_net, udest->addr.ip); if (atype != RTN_LOCAL && atype != RTN_UNICAST) return -EINVAL; } dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); if (dest == NULL) { pr_err("%s(): no memory.\n", __func__); return -ENOMEM; } dest->af = svc->af; dest->protocol = svc->protocol; dest->vaddr = svc->addr; dest->vport = svc->port; dest->vfwmark = svc->fwmark; ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr); dest->port = udest->port; atomic_set(&dest->activeconns, 0); atomic_set(&dest->inactconns, 0); atomic_set(&dest->persistconns, 0); atomic_set(&dest->refcnt, 0); INIT_LIST_HEAD(&dest->d_list); spin_lock_init(&dest->dst_lock); spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest); ip_vs_new_estimator(&dest->stats); *dest_p = dest; LeaveFunction(2); return 0; } /* * Add a destination into an existing service */ static int ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; union nf_inet_addr daddr; __be16 dport = udest->port; int ret; EnterFunction(2); if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { pr_err("%s(): lower threshold is higher than upper threshold\n", __func__); return -ERANGE; } ip_vs_addr_copy(svc->af, &daddr, &udest->addr); /* * Check if the dest already exists in the list */ dest = ip_vs_lookup_dest(svc, &daddr, dport); if (dest != NULL) { IP_VS_DBG(1, "%s(): dest already exists\n", __func__); return -EEXIST; } /* * Check if the dest already exists in the trash and * is from the same service */ dest = ip_vs_trash_get_dest(svc, &daddr, dport); if (dest != NULL) { IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " "dest->refcnt=%d, service %u/%s:%u\n", IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport), atomic_read(&dest->refcnt), dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->vaddr), ntohs(dest->vport)); __ip_vs_update_dest(svc, dest, udest); /* * Get the destination from the trash */ list_del(&dest->n_list); ip_vs_new_estimator(&dest->stats); write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); list_add(&dest->n_list, &svc->destinations); svc->num_dests++; /* call the update_service function of its scheduler */ if (svc->scheduler->update_service) svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); return 0; } /* * Allocate and initialize the dest structure */ ret = ip_vs_new_dest(svc, udest, &dest); if (ret) { return ret; } /* * Add the dest entry into the list */ atomic_inc(&dest->refcnt); write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); list_add(&dest->n_list, &svc->destinations); svc->num_dests++; /* call the update_service function of its scheduler */ if (svc->scheduler->update_service) svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); LeaveFunction(2); return 0; } /* * Edit a destination in the given service */ static int ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; union nf_inet_addr daddr; __be16 dport = udest->port; EnterFunction(2); if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; } if (udest->l_threshold > udest->u_threshold) { pr_err("%s(): lower threshold is higher than upper threshold\n", __func__); return -ERANGE; } ip_vs_addr_copy(svc->af, &daddr, &udest->addr); /* * Lookup the destination list */ dest = ip_vs_lookup_dest(svc, &daddr, dport); if (dest == NULL) { IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__); return -ENOENT; } __ip_vs_update_dest(svc, dest, udest); write_lock_bh(&__ip_vs_svc_lock); /* Wait until all other svc users go away */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); /* call the update_service, because server weight may be changed */ if (svc->scheduler->update_service) svc->scheduler->update_service(svc); write_unlock_bh(&__ip_vs_svc_lock); LeaveFunction(2); return 0; } /* * Delete a destination (must be already unlinked from the service) */ static void __ip_vs_del_dest(struct ip_vs_dest *dest) { ip_vs_kill_estimator(&dest->stats); /* * Remove it from the d-linked list with the real services. */ write_lock_bh(&__ip_vs_rs_lock); ip_vs_rs_unhash(dest); write_unlock_bh(&__ip_vs_rs_lock); /* * Decrease the refcnt of the dest, and free the dest * if nobody refers to it (refcnt=0). Otherwise, throw * the destination into the trash. */ if (atomic_dec_and_test(&dest->refcnt)) { ip_vs_dst_reset(dest); /* simply decrease svc->refcnt here, let the caller check and release the service if nobody refers to it. Only user context can release destination and service, and only one user context can update virtual service at a time, so the operation here is OK */ atomic_dec(&dest->svc->refcnt); kfree(dest); } else { IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, " "dest->refcnt=%d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); list_add(&dest->n_list, &ip_vs_dest_trash); atomic_inc(&dest->refcnt); } } /* * Unlink a destination from the given service */ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, int svcupd) { dest->flags &= ~IP_VS_DEST_F_AVAILABLE; /* * Remove it from the d-linked destination list. */ list_del(&dest->n_list); svc->num_dests--; /* * Call the update_service function of its scheduler */ if (svcupd && svc->scheduler->update_service) svc->scheduler->update_service(svc); } /* * Delete a destination server in the given service */ static int ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; __be16 dport = udest->port; EnterFunction(2); dest = ip_vs_lookup_dest(svc, &udest->addr, dport); if (dest == NULL) { IP_VS_DBG(1, "%s(): destination not found!\n", __func__); return -ENOENT; } write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); /* * Unlink dest from the service */ __ip_vs_unlink_dest(svc, dest, 1); write_unlock_bh(&__ip_vs_svc_lock); /* * Delete the destination */ __ip_vs_del_dest(dest); LeaveFunction(2); return 0; } /* * Add a service into the service hash table */ static int ip_vs_add_service(struct ip_vs_service_user_kern *u, struct ip_vs_service **svc_p) { int ret = 0; struct ip_vs_scheduler *sched = NULL; struct ip_vs_service *svc = NULL; /* increase the module use count */ ip_vs_use_count_inc(); /* Lookup the scheduler by 'u->sched_name' */ sched = ip_vs_scheduler_get(u->sched_name); if (sched == NULL) { pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); ret = -ENOENT; goto out_mod_dec; } #ifdef CONFIG_IP_VS_IPV6 if (u->af == AF_INET6 && (u->netmask < 1 || u->netmask > 128)) { ret = -EINVAL; goto out_err; } #endif svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); if (svc == NULL) { IP_VS_DBG(1, "%s(): no memory\n", __func__); ret = -ENOMEM; goto out_err; } /* I'm the first user of the service */ atomic_set(&svc->usecnt, 1); atomic_set(&svc->refcnt, 0); svc->af = u->af; svc->protocol = u->protocol; ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); svc->port = u->port; svc->fwmark = u->fwmark; svc->flags = u->flags; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; INIT_LIST_HEAD(&svc->destinations); rwlock_init(&svc->sched_lock); spin_lock_init(&svc->stats.lock); /* Bind the scheduler */ ret = ip_vs_bind_scheduler(svc, sched); if (ret) goto out_err; sched = NULL; /* Update the virtual service counters */ if (svc->port == FTPPORT) atomic_inc(&ip_vs_ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ip_vs_nullsvc_counter); ip_vs_new_estimator(&svc->stats); /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ip_vs_num_services++; /* Hash the service into the service table */ write_lock_bh(&__ip_vs_svc_lock); ip_vs_svc_hash(svc); write_unlock_bh(&__ip_vs_svc_lock); *svc_p = svc; return 0; out_err: if (svc != NULL) { if (svc->scheduler) ip_vs_unbind_scheduler(svc); if (svc->inc) { local_bh_disable(); ip_vs_app_inc_put(svc->inc); local_bh_enable(); } kfree(svc); } ip_vs_scheduler_put(sched); out_mod_dec: /* decrease the module use count */ ip_vs_use_count_dec(); return ret; } /* * Edit a service and bind it with a new scheduler */ static int ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) { struct ip_vs_scheduler *sched, *old_sched; int ret = 0; /* * Lookup the scheduler, by 'u->sched_name' */ sched = ip_vs_scheduler_get(u->sched_name); if (sched == NULL) { pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); return -ENOENT; } old_sched = sched; #ifdef CONFIG_IP_VS_IPV6 if (u->af == AF_INET6 && (u->netmask < 1 || u->netmask > 128)) { ret = -EINVAL; goto out; } #endif write_lock_bh(&__ip_vs_svc_lock); /* * Wait until all other svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); /* * Set the flags and timeout value */ svc->flags = u->flags | IP_VS_SVC_F_HASHED; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; old_sched = svc->scheduler; if (sched != old_sched) { /* * Unbind the old scheduler */ if ((ret = ip_vs_unbind_scheduler(svc))) { old_sched = sched; goto out_unlock; } /* * Bind the new scheduler */ if ((ret = ip_vs_bind_scheduler(svc, sched))) { /* * If ip_vs_bind_scheduler fails, restore the old * scheduler. * The main reason of failure is out of memory. * * The question is if the old scheduler can be * restored all the time. TODO: if it cannot be * restored some time, we must delete the service, * otherwise the system may crash. */ ip_vs_bind_scheduler(svc, old_sched); old_sched = sched; goto out_unlock; } } out_unlock: write_unlock_bh(&__ip_vs_svc_lock); #ifdef CONFIG_IP_VS_IPV6 out: #endif if (old_sched) ip_vs_scheduler_put(old_sched); return ret; } /* * Delete a service from the service list * - The service must be unlinked, unlocked and not referenced! * - We are called under _bh lock */ static void __ip_vs_del_service(struct ip_vs_service *svc) { struct ip_vs_dest *dest, *nxt; struct ip_vs_scheduler *old_sched; /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ip_vs_num_services--; ip_vs_kill_estimator(&svc->stats); /* Unbind scheduler */ old_sched = svc->scheduler; ip_vs_unbind_scheduler(svc); if (old_sched) ip_vs_scheduler_put(old_sched); /* Unbind app inc */ if (svc->inc) { ip_vs_app_inc_put(svc->inc); svc->inc = NULL; } /* * Unlink the whole destination list */ list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) { __ip_vs_unlink_dest(svc, dest, 0); __ip_vs_del_dest(dest); } /* * Update the virtual service counters */ if (svc->port == FTPPORT) atomic_dec(&ip_vs_ftpsvc_counter); else if (svc->port == 0) atomic_dec(&ip_vs_nullsvc_counter); /* * Free the service if nobody refers to it */ if (atomic_read(&svc->refcnt) == 0) kfree(svc); /* decrease the module use count */ ip_vs_use_count_dec(); } /* * Delete a service from the service list */ static int ip_vs_del_service(struct ip_vs_service *svc) { if (svc == NULL) return -EEXIST; /* * Unhash it from the service table */ write_lock_bh(&__ip_vs_svc_lock); ip_vs_svc_unhash(svc); /* * Wait until all the svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); __ip_vs_del_service(svc); write_unlock_bh(&__ip_vs_svc_lock); return 0; } /* * Flush all the virtual services */ static int ip_vs_flush(void) { int idx; struct ip_vs_service *svc, *nxt; /* * Flush the service table hashed by <protocol,addr,port> */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) { write_lock_bh(&__ip_vs_svc_lock); ip_vs_svc_unhash(svc); /* * Wait until all the svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0); __ip_vs_del_service(svc); write_unlock_bh(&__ip_vs_svc_lock); } } /* * Flush the service table hashed by fwmark */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry_safe(svc, nxt, &ip_vs_svc_fwm_table[idx], f_list) { write_lock_bh(&__ip_vs_svc_lock); ip_vs_svc_unhash(svc); /* * Wait until all the svc users go away. */ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0); __ip_vs_del_service(svc); write_unlock_bh(&__ip_vs_svc_lock); } } return 0; } /* * Zero counters in a service or all services */ static int ip_vs_zero_service(struct ip_vs_service *svc) { struct ip_vs_dest *dest; write_lock_bh(&__ip_vs_svc_lock); list_for_each_entry(dest, &svc->destinations, n_list) { ip_vs_zero_stats(&dest->stats); } ip_vs_zero_stats(&svc->stats); write_unlock_bh(&__ip_vs_svc_lock); return 0; } static int ip_vs_zero_all(void) { int idx; struct ip_vs_service *svc; for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { ip_vs_zero_service(svc); } } for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { ip_vs_zero_service(svc); } } ip_vs_zero_stats(&ip_vs_stats); return 0; } static int proc_do_defense_mode(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int val = *valp; int rc; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (write && (*valp != val)) { if ((*valp < 0) || (*valp > 3)) { /* Restore the correct value */ *valp = val; } else { update_defense_level(); } } return rc; } static int proc_do_sync_threshold(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int val[2]; int rc; /* backup the value first */ memcpy(val, valp, sizeof(val)); rc = proc_dointvec(table, write, buffer, lenp, ppos); if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { /* Restore the correct value */ memcpy(valp, val, sizeof(val)); } return rc; } /* * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/) */ static struct ctl_table vs_vars[] = { { .procname = "amemthresh", .data = &sysctl_ip_vs_amemthresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", .data = &sysctl_ip_vs_debug_level, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "am_droprate", .data = &sysctl_ip_vs_am_droprate, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "drop_entry", .data = &sysctl_ip_vs_drop_entry, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "drop_packet", .data = &sysctl_ip_vs_drop_packet, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, { .procname = "secure_tcp", .data = &sysctl_ip_vs_secure_tcp, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_defense_mode, }, #if 0 { .procname = "timeout_established", .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_synsent", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_synrecv", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_finwait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_timewait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_close", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_closewait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_lastack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_listen", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_synack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_udp", .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "timeout_icmp", .data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP], .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif { .procname = "cache_bypass", .data = &sysctl_ip_vs_cache_bypass, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "expire_nodest_conn", .data = &sysctl_ip_vs_expire_nodest_conn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "expire_quiescent_template", .data = &sysctl_ip_vs_expire_quiescent_template, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sync_threshold", .data = &sysctl_ip_vs_sync_threshold, .maxlen = sizeof(sysctl_ip_vs_sync_threshold), .mode = 0644, .proc_handler = proc_do_sync_threshold, }, { .procname = "nat_icmp_send", .data = &sysctl_ip_vs_nat_icmp_send, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; const struct ctl_path net_vs_ctl_path[] = { { .procname = "net", }, { .procname = "ipv4", }, { .procname = "vs", }, { } }; EXPORT_SYMBOL_GPL(net_vs_ctl_path); static struct ctl_table_header * sysctl_header; #ifdef CONFIG_PROC_FS struct ip_vs_iter { struct list_head *table; int bucket; }; /* * Write the contents of the VS rule table to a PROCfs file. * (It is kept just for backward compatibility) */ static inline const char *ip_vs_fwd_name(unsigned flags) { switch (flags & IP_VS_CONN_F_FWD_MASK) { case IP_VS_CONN_F_LOCALNODE: return "Local"; case IP_VS_CONN_F_TUNNEL: return "Tunnel"; case IP_VS_CONN_F_DROUTE: return "Route"; default: return "Masq"; } } /* Get the Nth entry in the two lists */ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) { struct ip_vs_iter *iter = seq->private; int idx; struct ip_vs_service *svc; /* look in hash by protocol */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { if (pos-- == 0){ iter->table = ip_vs_svc_table; iter->bucket = idx; return svc; } } } /* keep looking in fwmark */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { if (pos-- == 0) { iter->table = ip_vs_svc_fwm_table; iter->bucket = idx; return svc; } } } return NULL; } static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(__ip_vs_svc_lock) { read_lock_bh(&__ip_vs_svc_lock); return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct list_head *e; struct ip_vs_iter *iter; struct ip_vs_service *svc; ++*pos; if (v == SEQ_START_TOKEN) return ip_vs_info_array(seq,0); svc = v; iter = seq->private; if (iter->table == ip_vs_svc_table) { /* next service in table hashed by protocol */ if ((e = svc->s_list.next) != &ip_vs_svc_table[iter->bucket]) return list_entry(e, struct ip_vs_service, s_list); while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { list_for_each_entry(svc,&ip_vs_svc_table[iter->bucket], s_list) { return svc; } } iter->table = ip_vs_svc_fwm_table; iter->bucket = -1; goto scan_fwmark; } /* next service in hashed by fwmark */ if ((e = svc->f_list.next) != &ip_vs_svc_fwm_table[iter->bucket]) return list_entry(e, struct ip_vs_service, f_list); scan_fwmark: while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[iter->bucket], f_list) return svc; } return NULL; } static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) __releases(__ip_vs_svc_lock) { read_unlock_bh(&__ip_vs_svc_lock); } static int ip_vs_info_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, "IP Virtual Server version %d.%d.%d (size=%d)\n", NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); seq_puts(seq, "Prot LocalAddress:Port Scheduler Flags\n"); seq_puts(seq, " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n"); } else { const struct ip_vs_service *svc = v; const struct ip_vs_iter *iter = seq->private; const struct ip_vs_dest *dest; if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) seq_printf(seq, "%s [%pI6]:%04X %s ", ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), svc->scheduler->name); else #endif seq_printf(seq, "%s %08X:%04X %s ", ip_vs_proto_name(svc->protocol), ntohl(svc->addr.ip), ntohs(svc->port), svc->scheduler->name); } else { seq_printf(seq, "FWM %08X %s ", svc->fwmark, svc->scheduler->name); } if (svc->flags & IP_VS_SVC_F_PERSISTENT) seq_printf(seq, "persistent %d %08X\n", svc->timeout, ntohl(svc->netmask)); else seq_putc(seq, '\n'); list_for_each_entry(dest, &svc->destinations, n_list) { #ifdef CONFIG_IP_VS_IPV6 if (dest->af == AF_INET6) seq_printf(seq, " -> [%pI6]:%04X" " %-7s %-6d %-10d %-10d\n", &dest->addr.in6, ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); else #endif seq_printf(seq, " -> %08X:%04X " "%-7s %-6d %-10d %-10d\n", ntohl(dest->addr.ip), ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); } } return 0; } static const struct seq_operations ip_vs_info_seq_ops = { .start = ip_vs_info_seq_start, .next = ip_vs_info_seq_next, .stop = ip_vs_info_seq_stop, .show = ip_vs_info_seq_show, }; static int ip_vs_info_open(struct inode *inode, struct file *file) { return seq_open_private(file, &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)); } static const struct file_operations ip_vs_info_fops = { .owner = THIS_MODULE, .open = ip_vs_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif struct ip_vs_stats ip_vs_stats = { .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock), }; #ifdef CONFIG_PROC_FS static int ip_vs_stats_show(struct seq_file *seq, void *v) { /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); seq_printf(seq, " Conns Packets Packets Bytes Bytes\n"); spin_lock_bh(&ip_vs_stats.lock); seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns, ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts, (unsigned long long) ip_vs_stats.ustats.inbytes, (unsigned long long) ip_vs_stats.ustats.outbytes); /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); seq_printf(seq,"%8X %8X %8X %16X %16X\n", ip_vs_stats.ustats.cps, ip_vs_stats.ustats.inpps, ip_vs_stats.ustats.outpps, ip_vs_stats.ustats.inbps, ip_vs_stats.ustats.outbps); spin_unlock_bh(&ip_vs_stats.lock); return 0; } static int ip_vs_stats_seq_open(struct inode *inode, struct file *file) { return single_open(file, ip_vs_stats_show, NULL); } static const struct file_operations ip_vs_stats_fops = { .owner = THIS_MODULE, .open = ip_vs_stats_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* * Set timeout values for tcp tcpfin udp in the timeout_table. */ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u) { IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n", u->tcp_timeout, u->tcp_fin_timeout, u->udp_timeout); #ifdef CONFIG_IP_VS_PROTO_TCP if (u->tcp_timeout) { ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] = u->tcp_timeout * HZ; } if (u->tcp_fin_timeout) { ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] = u->tcp_fin_timeout * HZ; } #endif #ifdef CONFIG_IP_VS_PROTO_UDP if (u->udp_timeout) { ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] = u->udp_timeout * HZ; } #endif return 0; } #define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) #define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user)) #define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \ sizeof(struct ip_vs_dest_user)) #define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) #define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user)) #define MAX_ARG_LEN SVCDEST_ARG_LEN static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0, [SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN, [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, }; static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, struct ip_vs_service_user *usvc_compat) { usvc->af = AF_INET; usvc->protocol = usvc_compat->protocol; usvc->addr.ip = usvc_compat->addr; usvc->port = usvc_compat->port; usvc->fwmark = usvc_compat->fwmark; /* Deep copy of sched_name is not needed here */ usvc->sched_name = usvc_compat->sched_name; usvc->flags = usvc_compat->flags; usvc->timeout = usvc_compat->timeout; usvc->netmask = usvc_compat->netmask; } static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, struct ip_vs_dest_user *udest_compat) { udest->addr.ip = udest_compat->addr; udest->port = udest_compat->port; udest->conn_flags = udest_compat->conn_flags; udest->weight = udest_compat->weight; udest->u_threshold = udest_compat->u_threshold; udest->l_threshold = udest_compat->l_threshold; } static int do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; unsigned char arg[MAX_ARG_LEN]; struct ip_vs_service_user *usvc_compat; struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; struct ip_vs_dest_user *udest_compat; struct ip_vs_dest_user_kern udest; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) return -EINVAL; if (len < 0 || len > MAX_ARG_LEN) return -EINVAL; if (len != set_arglen[SET_CMDID(cmd)]) { pr_err("set_ctl: len %u != %u\n", len, set_arglen[SET_CMDID(cmd)]); return -EINVAL; } if (copy_from_user(arg, user, len) != 0) return -EFAULT; /* increase the module use count */ ip_vs_use_count_inc(); if (mutex_lock_interruptible(&__ip_vs_mutex)) { ret = -ERESTARTSYS; goto out_dec; } if (cmd == IP_VS_SO_SET_FLUSH) { /* Flush the virtual service */ ret = ip_vs_flush(); goto out_unlock; } else if (cmd == IP_VS_SO_SET_TIMEOUT) { /* Set timeout values for (tcp tcpfin udp) */ ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg); goto out_unlock; } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid); goto out_unlock; } else if (cmd == IP_VS_SO_SET_STOPDAEMON) { struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; ret = stop_sync_thread(dm->state); goto out_unlock; } usvc_compat = (struct ip_vs_service_user *)arg; udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1); /* We only use the new structs internally, so copy userspace compat * structs to extended internal versions */ ip_vs_copy_usvc_compat(&usvc, usvc_compat); ip_vs_copy_udest_compat(&udest, udest_compat); if (cmd == IP_VS_SO_SET_ZERO) { /* if no service address is set, zero counters in all */ if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) { ret = ip_vs_zero_all(); goto out_unlock; } } /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", usvc.protocol, &usvc.addr.ip, ntohs(usvc.port), usvc.sched_name); ret = -EFAULT; goto out_unlock; } /* Lookup the exact service by <protocol, addr, port> or fwmark */ if (usvc.fwmark == 0) svc = __ip_vs_service_get(usvc.af, usvc.protocol, &usvc.addr, usvc.port); else svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); if (cmd != IP_VS_SO_SET_ADD && (svc == NULL || svc->protocol != usvc.protocol)) { ret = -ESRCH; goto out_unlock; } switch (cmd) { case IP_VS_SO_SET_ADD: if (svc != NULL) ret = -EEXIST; else ret = ip_vs_add_service(&usvc, &svc); break; case IP_VS_SO_SET_EDIT: ret = ip_vs_edit_service(svc, &usvc); break; case IP_VS_SO_SET_DEL: ret = ip_vs_del_service(svc); if (!ret) goto out_unlock; break; case IP_VS_SO_SET_ZERO: ret = ip_vs_zero_service(svc); break; case IP_VS_SO_SET_ADDDEST: ret = ip_vs_add_dest(svc, &udest); break; case IP_VS_SO_SET_EDITDEST: ret = ip_vs_edit_dest(svc, &udest); break; case IP_VS_SO_SET_DELDEST: ret = ip_vs_del_dest(svc, &udest); break; default: ret = -EINVAL; } if (svc) ip_vs_service_put(svc); out_unlock: mutex_unlock(&__ip_vs_mutex); out_dec: /* decrease the module use count */ ip_vs_use_count_dec(); return ret; } static void ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) { spin_lock_bh(&src->lock); memcpy(dst, &src->ustats, sizeof(*dst)); spin_unlock_bh(&src->lock); } static void ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) { dst->protocol = src->protocol; dst->addr = src->addr.ip; dst->port = src->port; dst->fwmark = src->fwmark; strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; dst->num_dests = src->num_dests; ip_vs_copy_stats(&dst->stats, &src->stats); } static inline int __ip_vs_get_service_entries(const struct ip_vs_get_services *get, struct ip_vs_get_services __user *uptr) { int idx, count=0; struct ip_vs_service *svc; struct ip_vs_service_entry entry; int ret = 0; for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { /* Only expose IPv4 entries to old interface */ if (svc->af != AF_INET) continue; if (count >= get->num_services) goto out; memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; goto out; } count++; } } for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { /* Only expose IPv4 entries to old interface */ if (svc->af != AF_INET) continue; if (count >= get->num_services) goto out; memset(&entry, 0, sizeof(entry)); ip_vs_copy_service(&entry, svc); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; goto out; } count++; } } out: return ret; } static inline int __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, struct ip_vs_get_dests __user *uptr) { struct ip_vs_service *svc; union nf_inet_addr addr = { .ip = get->addr }; int ret = 0; if (get->fwmark) svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark); else svc = __ip_vs_service_get(AF_INET, get->protocol, &addr, get->port); if (svc) { int count = 0; struct ip_vs_dest *dest; struct ip_vs_dest_entry entry; list_for_each_entry(dest, &svc->destinations, n_list) { if (count >= get->num_dests) break; entry.addr = dest->addr.ip; entry.port = dest->port; entry.conn_flags = atomic_read(&dest->conn_flags); entry.weight = atomic_read(&dest->weight); entry.u_threshold = dest->u_threshold; entry.l_threshold = dest->l_threshold; entry.activeconns = atomic_read(&dest->activeconns); entry.inactconns = atomic_read(&dest->inactconns); entry.persistconns = atomic_read(&dest->persistconns); ip_vs_copy_stats(&entry.stats, &dest->stats); if (copy_to_user(&uptr->entrytable[count], &entry, sizeof(entry))) { ret = -EFAULT; break; } count++; } ip_vs_service_put(svc); } else ret = -ESRCH; return ret; } static inline void __ip_vs_get_timeouts(struct ip_vs_timeout_user *u) { #ifdef CONFIG_IP_VS_PROTO_TCP u->tcp_timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; u->tcp_fin_timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ; #endif #ifdef CONFIG_IP_VS_PROTO_UDP u->udp_timeout = ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ; #endif } #define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) #define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo)) #define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services)) #define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry)) #define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests)) #define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) #define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2) static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = { [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64, [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN, [GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN, }; static int do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { unsigned char arg[128]; int ret = 0; unsigned int copylen; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) return -EINVAL; if (*len < get_arglen[GET_CMDID(cmd)]) { pr_err("get_ctl: len %u < %u\n", *len, get_arglen[GET_CMDID(cmd)]); return -EINVAL; } copylen = get_arglen[GET_CMDID(cmd)]; if (copylen > 128) return -EINVAL; if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; switch (cmd) { case IP_VS_SO_GET_VERSION: { char buf[64]; sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); if (copy_to_user(user, buf, strlen(buf)+1) != 0) { ret = -EFAULT; goto out; } *len = strlen(buf)+1; } break; case IP_VS_SO_GET_INFO: { struct ip_vs_getinfo info; info.version = IP_VS_VERSION_CODE; info.size = IP_VS_CONN_TAB_SIZE; info.num_services = ip_vs_num_services; if (copy_to_user(user, &info, sizeof(info)) != 0) ret = -EFAULT; } break; case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; int size; get = (struct ip_vs_get_services *)arg; size = sizeof(*get) + sizeof(struct ip_vs_service_entry) * get->num_services; if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_service_entries(get, user); } break; case IP_VS_SO_GET_SERVICE: { struct ip_vs_service_entry *entry; struct ip_vs_service *svc; union nf_inet_addr addr; entry = (struct ip_vs_service_entry *)arg; addr.ip = entry->addr; if (entry->fwmark) svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark); else svc = __ip_vs_service_get(AF_INET, entry->protocol, &addr, entry->port); if (svc) { ip_vs_copy_service(entry, svc); if (copy_to_user(user, entry, sizeof(*entry)) != 0) ret = -EFAULT; ip_vs_service_put(svc); } else ret = -ESRCH; } break; case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; int size; get = (struct ip_vs_get_dests *)arg; size = sizeof(*get) + sizeof(struct ip_vs_dest_entry) * get->num_dests; if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_dest_entries(get, user); } break; case IP_VS_SO_GET_TIMEOUT: { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(&t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; } break; case IP_VS_SO_GET_DAEMON: { struct ip_vs_daemon_user d[2]; memset(&d, 0, sizeof(d)); if (ip_vs_sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ip_vs_master_syncid; } if (ip_vs_sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ip_vs_backup_syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) ret = -EFAULT; } break; default: ret = -EINVAL; } out: mutex_unlock(&__ip_vs_mutex); return ret; } static struct nf_sockopt_ops ip_vs_sockopts = { .pf = PF_INET, .set_optmin = IP_VS_BASE_CTL, .set_optmax = IP_VS_SO_SET_MAX+1, .set = do_ip_vs_set_ctl, .get_optmin = IP_VS_BASE_CTL, .get_optmax = IP_VS_SO_GET_MAX+1, .get = do_ip_vs_get_ctl, .owner = THIS_MODULE, }; /* * Generic Netlink interface */ /* IPVS genetlink family */ static struct genl_family ip_vs_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = IPVS_GENL_NAME, .version = IPVS_GENL_VERSION, .maxattr = IPVS_CMD_MAX, }; /* Policy used for first-level command attributes */ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = { [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED }, [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 }, [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 }, [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */ static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, .len = IP_VS_IFNAME_MAXLEN }, [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = { [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY, .len = sizeof(union nf_inet_addr) }, [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, .len = IP_VS_SCHEDNAME_MAXLEN }, [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, .len = sizeof(struct ip_vs_flags) }, [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED }, }; /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY, .len = sizeof(union nf_inet_addr) }, [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 }, [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED }, }; static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, struct ip_vs_stats *stats) { struct nlattr *nl_stats = nla_nest_start(skb, container_type); if (!nl_stats) return -EMSGSIZE; spin_lock_bh(&stats->lock); NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns); NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts); NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts); NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes); NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes); NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps); NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps); NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps); NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps); NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps); spin_unlock_bh(&stats->lock); nla_nest_end(skb, nl_stats); return 0; nla_put_failure: spin_unlock_bh(&stats->lock); nla_nest_cancel(skb, nl_stats); return -EMSGSIZE; } static int ip_vs_genl_fill_service(struct sk_buff *skb, struct ip_vs_service *svc) { struct nlattr *nl_service; struct ip_vs_flags flags = { .flags = svc->flags, .mask = ~0 }; nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); if (!nl_service) return -EMSGSIZE; NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); if (svc->fwmark) { NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); } else { NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); } NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) goto nla_put_failure; nla_nest_end(skb, nl_service); return 0; nla_put_failure: nla_nest_cancel(skb, nl_service); return -EMSGSIZE; } static int ip_vs_genl_dump_service(struct sk_buff *skb, struct ip_vs_service *svc, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_SERVICE); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_service(skb, svc) < 0) goto nla_put_failure; return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_services(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0, i; int start = cb->args[0]; struct ip_vs_service *svc; mutex_lock(&__ip_vs_mutex); for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { if (++idx <= start) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; goto nla_put_failure; } } } for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { if (++idx <= start) continue; if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { idx--; goto nla_put_failure; } } } nla_put_failure: mutex_unlock(&__ip_vs_mutex); cb->args[0] = idx; return skb->len; } static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, struct nlattr *nla, int full_entry) { struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1]; struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr; /* Parse mandatory identifying service fields first */ if (nla == NULL || nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy)) return -EINVAL; nla_af = attrs[IPVS_SVC_ATTR_AF]; nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL]; nla_addr = attrs[IPVS_SVC_ATTR_ADDR]; nla_port = attrs[IPVS_SVC_ATTR_PORT]; nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK]; if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) return -EINVAL; memset(usvc, 0, sizeof(*usvc)); usvc->af = nla_get_u16(nla_af); #ifdef CONFIG_IP_VS_IPV6 if (usvc->af != AF_INET && usvc->af != AF_INET6) #else if (usvc->af != AF_INET) #endif return -EAFNOSUPPORT; if (nla_fwmark) { usvc->protocol = IPPROTO_TCP; usvc->fwmark = nla_get_u32(nla_fwmark); } else { usvc->protocol = nla_get_u16(nla_protocol); nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr)); usvc->port = nla_get_u16(nla_port); usvc->fwmark = 0; } /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_sched, *nla_flags, *nla_timeout, *nla_netmask; struct ip_vs_flags flags; struct ip_vs_service *svc; nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME]; nla_flags = attrs[IPVS_SVC_ATTR_FLAGS]; nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT]; nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK]; if (!(nla_sched && nla_flags && nla_timeout && nla_netmask)) return -EINVAL; nla_memcpy(&flags, nla_flags, sizeof(flags)); /* prefill flags from service if it already exists */ if (usvc->fwmark) svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark); else svc = __ip_vs_service_get(usvc->af, usvc->protocol, &usvc->addr, usvc->port); if (svc) { usvc->flags = svc->flags; ip_vs_service_put(svc); } else usvc->flags = 0; /* set new flags from userland */ usvc->flags = (usvc->flags & ~flags.mask) | (flags.flags & flags.mask); usvc->sched_name = nla_data(nla_sched); usvc->timeout = nla_get_u32(nla_timeout); usvc->netmask = nla_get_u32(nla_netmask); } return 0; } static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla) { struct ip_vs_service_user_kern usvc; int ret; ret = ip_vs_genl_parse_service(&usvc, nla, 0); if (ret) return ERR_PTR(ret); if (usvc.fwmark) return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); else return __ip_vs_service_get(usvc.af, usvc.protocol, &usvc.addr, usvc.port); } static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) { struct nlattr *nl_dest; nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST); if (!nl_dest) return -EMSGSIZE; NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, atomic_read(&dest->activeconns)); NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, atomic_read(&dest->inactconns)); NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, atomic_read(&dest->persistconns)); if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) goto nla_put_failure; nla_nest_end(skb, nl_dest); return 0; nla_put_failure: nla_nest_cancel(skb, nl_dest); return -EMSGSIZE; } static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_DEST); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_dest(skb, dest) < 0) goto nla_put_failure; return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_dests(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; int start = cb->args[0]; struct ip_vs_service *svc; struct ip_vs_dest *dest; struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; mutex_lock(&__ip_vs_mutex); /* Try to find the service for which to dump destinations */ if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy)) goto out_err; svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR(svc) || svc == NULL) goto out_err; /* Dump the destinations */ list_for_each_entry(dest, &svc->destinations, n_list) { if (++idx <= start) continue; if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) { idx--; goto nla_put_failure; } } nla_put_failure: cb->args[0] = idx; ip_vs_service_put(svc); out_err: mutex_unlock(&__ip_vs_mutex); return skb->len; } static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, struct nlattr *nla, int full_entry) { struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1]; struct nlattr *nla_addr, *nla_port; /* Parse mandatory identifying destination fields first */ if (nla == NULL || nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy)) return -EINVAL; nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; nla_port = attrs[IPVS_DEST_ATTR_PORT]; if (!(nla_addr && nla_port)) return -EINVAL; memset(udest, 0, sizeof(*udest)); nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); udest->port = nla_get_u16(nla_port); /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, *nla_l_thresh; nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD]; nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT]; nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH]; nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH]; if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh)) return -EINVAL; udest->conn_flags = nla_get_u32(nla_fwd) & IP_VS_CONN_F_FWD_MASK; udest->weight = nla_get_u32(nla_weight); udest->u_threshold = nla_get_u32(nla_u_thresh); udest->l_threshold = nla_get_u32(nla_l_thresh); } return 0; } static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state, const char *mcast_ifn, __be32 syncid) { struct nlattr *nl_daemon; nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON); if (!nl_daemon) return -EMSGSIZE; NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); nla_nest_end(skb, nl_daemon); return 0; nla_put_failure: nla_nest_cancel(skb, nl_daemon); return -EMSGSIZE; } static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state, const char *mcast_ifn, __be32 syncid, struct netlink_callback *cb) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &ip_vs_genl_family, NLM_F_MULTI, IPVS_CMD_NEW_DAEMON); if (!hdr) return -EMSGSIZE; if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid)) goto nla_put_failure; return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int ip_vs_genl_dump_daemons(struct sk_buff *skb, struct netlink_callback *cb) { mutex_lock(&__ip_vs_mutex); if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, ip_vs_master_mcast_ifn, ip_vs_master_syncid, cb) < 0) goto nla_put_failure; cb->args[0] = 1; } if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP, ip_vs_backup_mcast_ifn, ip_vs_backup_syncid, cb) < 0) goto nla_put_failure; cb->args[1] = 1; } nla_put_failure: mutex_unlock(&__ip_vs_mutex); return skb->len; } static int ip_vs_genl_new_daemon(struct nlattr **attrs) { if (!(attrs[IPVS_DAEMON_ATTR_STATE] && attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && attrs[IPVS_DAEMON_ATTR_SYNC_ID])) return -EINVAL; return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID])); } static int ip_vs_genl_del_daemon(struct nlattr **attrs) { if (!attrs[IPVS_DAEMON_ATTR_STATE]) return -EINVAL; return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); } static int ip_vs_genl_set_config(struct nlattr **attrs) { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(&t); if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]) t.tcp_fin_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]); if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); return ip_vs_set_timeout(&t); } static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) { struct ip_vs_service *svc = NULL; struct ip_vs_service_user_kern usvc; struct ip_vs_dest_user_kern udest; int ret = 0, cmd; int need_full_svc = 0, need_full_dest = 0; cmd = info->genlhdr->cmd; mutex_lock(&__ip_vs_mutex); if (cmd == IPVS_CMD_FLUSH) { ret = ip_vs_flush(); goto out; } else if (cmd == IPVS_CMD_SET_CONFIG) { ret = ip_vs_genl_set_config(info->attrs); goto out; } else if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], ip_vs_daemon_policy)) { ret = -EINVAL; goto out; } if (cmd == IPVS_CMD_NEW_DAEMON) ret = ip_vs_genl_new_daemon(daemon_attrs); else ret = ip_vs_genl_del_daemon(daemon_attrs); goto out; } else if (cmd == IPVS_CMD_ZERO && !info->attrs[IPVS_CMD_ATTR_SERVICE]) { ret = ip_vs_zero_all(); goto out; } /* All following commands require a service argument, so check if we * received a valid one. We need a full service specification when * adding / editing a service. Only identifying members otherwise. */ if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE) need_full_svc = 1; ret = ip_vs_genl_parse_service(&usvc, info->attrs[IPVS_CMD_ATTR_SERVICE], need_full_svc); if (ret) goto out; /* Lookup the exact service by <protocol, addr, port> or fwmark */ if (usvc.fwmark == 0) svc = __ip_vs_service_get(usvc.af, usvc.protocol, &usvc.addr, usvc.port); else svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); /* Unless we're adding a new service, the service must already exist */ if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) { ret = -ESRCH; goto out; } /* Destination commands require a valid destination argument. For * adding / editing a destination, we need a full destination * specification. */ if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST || cmd == IPVS_CMD_DEL_DEST) { if (cmd != IPVS_CMD_DEL_DEST) need_full_dest = 1; ret = ip_vs_genl_parse_dest(&udest, info->attrs[IPVS_CMD_ATTR_DEST], need_full_dest); if (ret) goto out; } switch (cmd) { case IPVS_CMD_NEW_SERVICE: if (svc == NULL) ret = ip_vs_add_service(&usvc, &svc); else ret = -EEXIST; break; case IPVS_CMD_SET_SERVICE: ret = ip_vs_edit_service(svc, &usvc); break; case IPVS_CMD_DEL_SERVICE: ret = ip_vs_del_service(svc); break; case IPVS_CMD_NEW_DEST: ret = ip_vs_add_dest(svc, &udest); break; case IPVS_CMD_SET_DEST: ret = ip_vs_edit_dest(svc, &udest); break; case IPVS_CMD_DEL_DEST: ret = ip_vs_del_dest(svc, &udest); break; case IPVS_CMD_ZERO: ret = ip_vs_zero_service(svc); break; default: ret = -EINVAL; } out: if (svc) ip_vs_service_put(svc); mutex_unlock(&__ip_vs_mutex); return ret; } static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *reply; int ret, cmd, reply_cmd; cmd = info->genlhdr->cmd; if (cmd == IPVS_CMD_GET_SERVICE) reply_cmd = IPVS_CMD_NEW_SERVICE; else if (cmd == IPVS_CMD_GET_INFO) reply_cmd = IPVS_CMD_SET_INFO; else if (cmd == IPVS_CMD_GET_CONFIG) reply_cmd = IPVS_CMD_SET_CONFIG; else { pr_err("unknown Generic Netlink command\n"); return -EINVAL; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; mutex_lock(&__ip_vs_mutex); reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd); if (reply == NULL) goto nla_put_failure; switch (cmd) { case IPVS_CMD_GET_SERVICE: { struct ip_vs_service *svc; svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]); if (IS_ERR(svc)) { ret = PTR_ERR(svc); goto out_err; } else if (svc) { ret = ip_vs_genl_fill_service(msg, svc); ip_vs_service_put(svc); if (ret) goto nla_put_failure; } else { ret = -ESRCH; goto out_err; } break; } case IPVS_CMD_GET_CONFIG: { struct ip_vs_timeout_user t; __ip_vs_get_timeouts(&t); #ifdef CONFIG_IP_VS_PROTO_TCP NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, t.tcp_fin_timeout); #endif #ifdef CONFIG_IP_VS_PROTO_UDP NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); #endif break; } case IPVS_CMD_GET_INFO: NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, IP_VS_CONN_TAB_SIZE); break; } genlmsg_end(msg, reply); ret = genlmsg_reply(msg, info); goto out; nla_put_failure: pr_err("not enough space in Netlink message\n"); ret = -EMSGSIZE; out_err: nlmsg_free(msg); out: mutex_unlock(&__ip_vs_mutex); return ret; } static struct genl_ops ip_vs_genl_ops[] __read_mostly = { { .cmd = IPVS_CMD_NEW_SERVICE, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_SET_SERVICE, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_DEL_SERVICE, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_SERVICE, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, .dumpit = ip_vs_genl_dump_services, .policy = ip_vs_cmd_policy, }, { .cmd = IPVS_CMD_NEW_DEST, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_SET_DEST, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_DEL_DEST, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_DEST, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .dumpit = ip_vs_genl_dump_dests, }, { .cmd = IPVS_CMD_NEW_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_DEL_DAEMON, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_DAEMON, .flags = GENL_ADMIN_PERM, .dumpit = ip_vs_genl_dump_daemons, }, { .cmd = IPVS_CMD_SET_CONFIG, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_GET_CONFIG, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, }, { .cmd = IPVS_CMD_GET_INFO, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_get_cmd, }, { .cmd = IPVS_CMD_ZERO, .flags = GENL_ADMIN_PERM, .policy = ip_vs_cmd_policy, .doit = ip_vs_genl_set_cmd, }, { .cmd = IPVS_CMD_FLUSH, .flags = GENL_ADMIN_PERM, .doit = ip_vs_genl_set_cmd, }, }; static int __init ip_vs_genl_register(void) { return genl_register_family_with_ops(&ip_vs_genl_family, ip_vs_genl_ops, ARRAY_SIZE(ip_vs_genl_ops)); } static void ip_vs_genl_unregister(void) { genl_unregister_family(&ip_vs_genl_family); } /* End of Generic Netlink interface definitions */ int __init ip_vs_control_init(void) { int ret; int idx; EnterFunction(2); ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); return ret; } ret = ip_vs_genl_register(); if (ret) { pr_err("cannot register Generic Netlink interface.\n"); nf_unregister_sockopt(&ip_vs_sockopts); return ret; } proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars); /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */ for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { INIT_LIST_HEAD(&ip_vs_svc_table[idx]); INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); } for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) { INIT_LIST_HEAD(&ip_vs_rtable[idx]); } ip_vs_new_estimator(&ip_vs_stats); /* Hook the defense timer */ schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD); LeaveFunction(2); return 0; } void ip_vs_control_cleanup(void) { EnterFunction(2); ip_vs_trash_cleanup(); cancel_rearming_delayed_work(&defense_work); cancel_work_sync(&defense_work.work); ip_vs_kill_estimator(&ip_vs_stats); unregister_sysctl_table(sysctl_header); proc_net_remove(&init_net, "ip_vs_stats"); proc_net_remove(&init_net, "ip_vs"); ip_vs_genl_unregister(); nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_5766_0
crossvul-cpp_data_bad_4777_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % JJJJJ PPPP EEEEE GGGG % % J P P E G % % J PPPP EEE G GG % % J J P E G G % % JJJ P EEEEE GGG % % % % % % Read/Write JPEG Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This software is based in part on the work of the Independent JPEG Group. % See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and % licensing restrictions. Blob support contributed by Glenn Randers-Pehrson. % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" #include <setjmp.h> #if defined(MAGICKCORE_JPEG_DELEGATE) #define JPEG_INTERNAL_OPTIONS #if defined(__MINGW32__) || defined(__MINGW64__) # define XMD_H 1 /* Avoid conflicting typedef for INT32 */ #endif #undef HAVE_STDLIB_H #include "jpeglib.h" #include "jerror.h" #endif /* Define declarations. */ #define ICC_MARKER (JPEG_APP0+2) #define ICC_PROFILE "ICC_PROFILE" #define IPTC_MARKER (JPEG_APP0+13) #define XML_MARKER (JPEG_APP0+1) #define MaxBufferExtent 16384 /* Typedef declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) typedef struct _DestinationManager { struct jpeg_destination_mgr manager; Image *image; JOCTET *buffer; } DestinationManager; typedef struct _ErrorManager { Image *image; MagickBooleanType finished; StringInfo *profile; jmp_buf error_recovery; } ErrorManager; typedef struct _SourceManager { struct jpeg_source_mgr manager; Image *image; JOCTET *buffer; boolean start_of_blob; } SourceManager; #endif typedef struct _QuantizationTable { char *slot, *description; size_t width, height; double divisor; unsigned int *levels; } QuantizationTable; /* Forward declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) static MagickBooleanType WriteJPEGImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J P E G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJPEG() returns MagickTrue if the image format type, identified by the % magick string, is JPEG. % % The format of the IsJPEG method is: % % MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) { if (length < 3) return(MagickFalse); if (memcmp(magick,"\377\330\377",3) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJPEGImage() reads a JPEG image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer to % the new image. % % The format of the ReadJPEGImage method is: % % Image *ReadJPEGImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static boolean FillInputBuffer(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image, MaxBufferExtent,source->buffer); if (source->manager.bytes_in_buffer == 0) { if (source->start_of_blob != FALSE) ERREXIT(cinfo,JERR_INPUT_EMPTY); WARNMS(cinfo,JWRN_JPEG_EOF); source->buffer[0]=(JOCTET) 0xff; source->buffer[1]=(JOCTET) JPEG_EOI; source->manager.bytes_in_buffer=2; } source->manager.next_input_byte=source->buffer; source->start_of_blob=FALSE; return(TRUE); } static int GetCharacter(j_decompress_ptr jpeg_info) { if (jpeg_info->src->bytes_in_buffer == 0) (void) (*jpeg_info->src->fill_input_buffer)(jpeg_info); jpeg_info->src->bytes_in_buffer--; return((int) GETJOCTET(*jpeg_info->src->next_input_byte++)); } static void InitializeSource(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->start_of_blob=TRUE; } static MagickBooleanType IsITUFaxImage(const Image *image) { const StringInfo *profile; const unsigned char *datum; profile=GetImageProfile(image,"8bim"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if (GetStringInfoLength(profile) < 5) return(MagickFalse); datum=GetStringInfoDatum(profile); if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) && (datum[3] == 0x41) && (datum[4] == 0x58)) return(MagickTrue); return(MagickFalse); } static void JPEGErrorHandler(j_common_ptr jpeg_info) { char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; (jpeg_info->err->format_message)(jpeg_info,message); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); if (error_manager->finished != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,(char *) message,"`%s'",image->filename); else (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,(char *) message,"`%s'",image->filename); longjmp(error_manager->error_recovery,1); } static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level) { #define JPEGExcessiveWarnings 1000 char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; if (level < 0) { /* Process warning message. */ (jpeg_info->err->format_message)(jpeg_info,message); if (jpeg_info->err->num_warnings++ > JPEGExcessiveWarnings) JPEGErrorHandler(jpeg_info); ThrowBinaryException(CorruptImageWarning,(char *) message, image->filename); } else if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level)) { /* Process trace message. */ (jpeg_info->err->format_message)(jpeg_info,message); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); } return(MagickTrue); } static boolean ReadComment(j_decompress_ptr jpeg_info) { ErrorManager *error_manager; Image *image; register unsigned char *p; register ssize_t i; size_t length; StringInfo *comment; /* Determine length of comment. */ error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; comment=BlobToStringInfo((const void *) NULL,length); if (comment == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } /* Read comment. */ error_manager->profile=comment; p=GetStringInfoDatum(comment); for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++) *p++=(unsigned char) GetCharacter(jpeg_info); *p='\0'; error_manager->profile=NULL; p=GetStringInfoDatum(comment); (void) SetImageProperty(image,"comment",(const char *) p); comment=DestroyStringInfo(comment); return(TRUE); } static boolean ReadICCProfile(j_decompress_ptr jpeg_info) { char magick[12]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *icc_profile, *profile; /* Read color profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } for (i=0; i < 12; i++) magick[i]=(char) GetCharacter(jpeg_info); if (LocaleCompare(magick,ICC_PROFILE) != 0) { /* Not a ICC profile, return. */ for (i=0; i < (ssize_t) (length-12); i++) (void) GetCharacter(jpeg_info); return(TRUE); } (void) GetCharacter(jpeg_info); /* id */ (void) GetCharacter(jpeg_info); /* markers */ length-=14; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; icc_profile=(StringInfo *) GetImageProfile(image,"icc"); if (icc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(icc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: ICC, %.20g bytes",(double) length); return(TRUE); } static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info) { char magick[MaxTextExtent]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *iptc_profile, *profile; /* Determine length of binary data stored here. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } /* Validate that this was written as a Photoshop resource format slug. */ for (i=0; i < 10; i++) magick[i]=(char) GetCharacter(jpeg_info); magick[10]='\0'; length-=10; if (length <= 10) return(TRUE); if (LocaleCompare(magick,"Photoshop ") != 0) { /* Not a IPTC profile, return. */ for (i=0; i < (ssize_t) length; i++) (void) GetCharacter(jpeg_info); return(TRUE); } /* Remove the version number. */ for (i=0; i < 4; i++) (void) GetCharacter(jpeg_info); if (length <= 11) return(TRUE); length-=4; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; iptc_profile=(StringInfo *) GetImageProfile(image,"8bim"); if (iptc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(iptc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: iptc, %.20g bytes",(double) length); return(TRUE); } static boolean ReadProfile(j_decompress_ptr jpeg_info) { char name[MaxTextExtent]; const StringInfo *previous_profile; ErrorManager *error_manager; Image *image; int marker; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *profile; /* Read generic profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; marker=jpeg_info->unread_marker-JPEG_APP0; (void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker); error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; if (marker == 1) { p=GetStringInfoDatum(profile); if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0)) (void) CopyMagickString(name,"exif",MaxTextExtent); if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0)) { ssize_t j; /* Extract namespace from XMP profile. */ p=GetStringInfoDatum(profile); for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++) { if (*p == '\0') break; p++; } if (j < (ssize_t) GetStringInfoLength(profile)) (void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1))); (void) CopyMagickString(name,"xmp",MaxTextExtent); } } previous_profile=GetImageProfile(image,name); if (previous_profile != (const StringInfo *) NULL) { size_t length; length=GetStringInfoLength(profile); SetStringInfoLength(profile,GetStringInfoLength(profile)+ GetStringInfoLength(previous_profile)); (void) memmove(GetStringInfoDatum(profile)+ GetStringInfoLength(previous_profile),GetStringInfoDatum(profile), length); (void) memcpy(GetStringInfoDatum(profile), GetStringInfoDatum(previous_profile), GetStringInfoLength(previous_profile)); } status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: %s, %.20g bytes",name,(double) length); return(TRUE); } static void SkipInputData(j_decompress_ptr cinfo,long number_bytes) { SourceManager *source; if (number_bytes <= 0) return; source=(SourceManager *) cinfo->src; while (number_bytes > (long) source->manager.bytes_in_buffer) { number_bytes-=(long) source->manager.bytes_in_buffer; (void) FillInputBuffer(cinfo); } source->manager.next_input_byte+=number_bytes; source->manager.bytes_in_buffer-=number_bytes; } static void TerminateSource(j_decompress_ptr cinfo) { (void) cinfo; } static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image) { SourceManager *source; cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager)); source=(SourceManager *) cinfo->src; source->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); source=(SourceManager *) cinfo->src; source->manager.init_source=InitializeSource; source->manager.fill_input_buffer=FillInputBuffer; source->manager.skip_input_data=SkipInputData; source->manager.resync_to_restart=jpeg_resync_to_restart; source->manager.term_source=TerminateSource; source->manager.bytes_in_buffer=0; source->manager.next_input_byte=NULL; source->image=image; } static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info, Image *image) { image->quality=UndefinedCompressionQuality; #if defined(D_PROGRESSIVE_SUPPORTED) if (image->compression == LosslessJPEGCompression) { image->quality=100; (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: 100 (lossless)"); } else #endif { ssize_t j, qvalue, sum; register ssize_t i; /* Determine the JPEG compression quality from the quantization tables. */ sum=0; for (i=0; i < NUM_QUANT_TBLS; i++) { if (jpeg_info->quant_tbl_ptrs[i] != NULL) for (j=0; j < DCTSIZE2; j++) sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j]; } if ((jpeg_info->quant_tbl_ptrs[0] != NULL) && (jpeg_info->quant_tbl_ptrs[1] != NULL)) { ssize_t hash[101] = { 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, 0 }, sums[101] = { 32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]+ jpeg_info->quant_tbl_ptrs[1]->quantval[0]+ jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } else if (jpeg_info->quant_tbl_ptrs[0] != NULL) { ssize_t hash[101] = { 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, 0 }, sums[101] = { 16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } } } static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image) { char sampling_factor[MaxTextExtent]; switch (jpeg_info->out_color_space) { case JCS_CMYK: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } case JCS_GRAYSCALE: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAYSCALE"); (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor); break; } case JCS_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", jpeg_info->out_color_space); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } } (void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s", sampling_factor); } static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterJPEGImage() adds properties for the JPEG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterJPEGImage method is: % % size_t RegisterJPEGImage(void) % */ ModuleExport size_t RegisterJPEGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char description[] = "Joint Photographic Experts Group JFIF format"; *version='\0'; #if defined(JPEG_LIB_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION); #endif entry=SetMagickInfo("JPE"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPS"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PJPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterJPEGImage() removes format registrations made by the % JPEG module from the list of supported formats. % % The format of the UnregisterJPEGImage method is: % % UnregisterJPEGImage(void) % */ ModuleExport void UnregisterJPEGImage(void) { (void) UnregisterMagickInfo("PJPG"); (void) UnregisterMagickInfo("JPS"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPEG"); (void) UnregisterMagickInfo("JPE"); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJPEGImage() writes a JPEG image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the WriteJPEGImage method is: % % MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o jpeg_image: The image. % % */ static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); } static boolean EmptyOutputBuffer(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image, MaxBufferExtent,destination->buffer); if (destination->manager.free_in_buffer != MaxBufferExtent) ERREXIT(cinfo,JERR_FILE_WRITE); destination->manager.next_output_byte=destination->buffer; return(TRUE); } static QuantizationTable *GetQuantizationTable(const char *filename, const char *slot,ExceptionInfo *exception) { char *p, *xml; const char *attribute, *content; double value; register ssize_t i; QuantizationTable *table; size_t length; ssize_t j; XMLTreeInfo *description, *levels, *quantization_tables, *table_iterator; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading quantization tables \"%s\" ...",filename); table=(QuantizationTable *) NULL; xml=FileToString(filename,~0UL,exception); if (xml == (char *) NULL) return(table); quantization_tables=NewXMLTree(xml,exception); if (quantization_tables == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } for (table_iterator=GetXMLTreeChild(quantization_tables,"table"); table_iterator != (XMLTreeInfo *) NULL; table_iterator=GetNextXMLTreeTag(table_iterator)) { attribute=GetXMLTreeAttribute(table_iterator,"slot"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; attribute=GetXMLTreeAttribute(table_iterator,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; } if (table_iterator == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } description=GetXMLTreeChild(table_iterator,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<description>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } levels=GetXMLTreeChild(table_iterator,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<levels>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table)); if (table == (QuantizationTable *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); table->slot=(char *) NULL; table->description=(char *) NULL; table->levels=(unsigned int *) NULL; attribute=GetXMLTreeAttribute(table_iterator,"slot"); if (attribute != (char *) NULL) table->slot=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) table->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels width>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->width=StringToUnsignedLong(attribute); if (table->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels width>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->height=StringToUnsignedLong(attribute); if (table->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->divisor=InterpretLocaleValue(attribute,(char **) NULL); if (table->divisor == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent","<levels>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } length=(size_t) table->width*table->height; if (length < 64) length=64; table->levels=(unsigned int *) AcquireQuantumMemory(length, sizeof(*table->levels)); if (table->levels == (unsigned int *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); for (i=0; i < (ssize_t) (table->width*table->height); i++) { table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/ table->divisor+0.5); while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; content=p; } value=InterpretLocaleValue(content,&p); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent","<level> too many values, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } for (j=i; j < 64; j++) table->levels[j]=table->levels[j-1]; quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } static void InitializeDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); destination->manager.next_output_byte=destination->buffer; destination->manager.free_in_buffer=MaxBufferExtent; } static void TerminateDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0) { ssize_t count; count=WriteBlob(destination->image,MaxBufferExtent- destination->manager.free_in_buffer,destination->buffer); if (count != (ssize_t) (MaxBufferExtent-destination->manager.free_in_buffer)) ERREXIT(cinfo,JERR_FILE_WRITE); } } static void WriteProfile(j_compress_ptr jpeg_info,Image *image) { const char *name; const StringInfo *profile; MagickBooleanType iptc; register ssize_t i; size_t length, tag_length; StringInfo *custom_profile; /* Save image profile as a APP marker. */ iptc=MagickFalse; custom_profile=AcquireStringInfo(65535L); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { register unsigned char *p; profile=GetImageProfile(image,name); p=GetStringInfoDatum(custom_profile); if (LocaleCompare(name,"EXIF") == 0) for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65533L) { length=MagickMin(GetStringInfoLength(profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile)+i, (unsigned int) length); } if (LocaleCompare(name,"ICC") == 0) { register unsigned char *p; tag_length=strlen(ICC_PROFILE); p=GetStringInfoDatum(custom_profile); (void) CopyMagickMemory(p,ICC_PROFILE,tag_length); p[tag_length]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L) { length=MagickMin(GetStringInfoLength(profile)-i,65519L); p[12]=(unsigned char) ((i/65519L)+1); p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1); (void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i, length); jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+3)); } } if (((LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse)) { size_t roundup; iptc=MagickTrue; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L) { length=MagickMin(GetStringInfoLength(profile)-i,65500L); roundup=(size_t) (length & 0x01); if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0) { (void) memcpy(p,"Photoshop 3.0 ",14); tag_length=14; } else { (void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24); tag_length=26; p[24]=(unsigned char) (length >> 8); p[25]=(unsigned char) (length & 0xff); } p[13]=0x00; (void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length); if (roundup != 0) p[length+tag_length]='\0'; jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+roundup)); } } if (LocaleCompare(name,"XMP") == 0) { StringInfo *xmp_profile; /* Add namespace to XMP profile. */ xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ "); if (xmp_profile != (StringInfo *) NULL) { if (profile != (StringInfo *) NULL) ConcatenateStringInfo(xmp_profile,profile); GetStringInfoDatum(xmp_profile)[28]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L) { length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER, GetStringInfoDatum(xmp_profile)+i,(unsigned int) length); } xmp_profile=DestroyStringInfo(xmp_profile); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), "%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile)); name=GetNextImageProfile(image); } custom_profile=DestroyStringInfo(custom_profile); } static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image) { DestinationManager *destination; cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager)); destination=(DestinationManager *) cinfo->dest; destination->manager.init_destination=InitializeDestination; destination->manager.empty_output_buffer=EmptyOutputBuffer; destination->manager.term_destination=TerminateDestination; destination->image=image; } static char **SamplingFactorToList(const char *text) { char **textlist; register char *q; register const char *p; register ssize_t i; if (text == (char *) NULL) return((char **) NULL); /* Convert string to an ASCII list. */ textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS, sizeof(*textlist)); if (textlist == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); p=text; for (i=0; i < (ssize_t) MAX_COMPONENTS; i++) { for (q=(char *) p; *q != '\0'; q++) if (*q == ',') break; textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent, sizeof(*textlist[i])); if (textlist[i] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); (void) CopyMagickString(textlist[i],p,(size_t) (q-p+1)); if (*q == '\r') q++; if (*q == '\0') break; p=q+1; } for (i++; i < (ssize_t) MAX_COMPONENTS; i++) textlist[i]=ConstantString("1x1"); return(textlist); } static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, Image *image) { const char *option, *sampling_factor, *value; ErrorManager error_manager; ExceptionInfo *exception; Image *volatile volatile_image; int colorspace, quality; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType status; MemoryInfo *memory_info; register JSAMPLE *q; register ssize_t i; ssize_t y; struct jpeg_compress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; unsigned short scale; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if ((LocaleCompare(image_info->magick,"JPS") == 0) && (image->next != (Image *) NULL)) image=AppendImages(image,MagickFalse,exception); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); volatile_image=image; jpeg_info.client_data=(void *) volatile_image; jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; error_manager.image=volatile_image; memory_info=(MemoryInfo *) NULL; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); (void) CloseBlob(volatile_image); return(MagickFalse); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_compress(&jpeg_info); JPEGDestinationManager(&jpeg_info,image); if ((image->columns != (unsigned int) image->columns) || (image->rows != (unsigned int) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); jpeg_info.image_width=(unsigned int) image->columns; jpeg_info.image_height=(unsigned int) image->rows; jpeg_info.input_components=3; jpeg_info.data_precision=8; jpeg_info.in_color_space=JCS_RGB; switch (image->colorspace) { case CMYKColorspace: { jpeg_info.input_components=4; jpeg_info.in_color_space=JCS_CMYK; break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { jpeg_info.in_color_space=JCS_YCbCr; break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { if (image_info->type == TrueColorType) break; jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; break; } default: { (void) TransformImageColorspace(image,sRGBColorspace); if (image_info->type == TrueColorType) break; if (SetImageGray(image,&image->exception) != MagickFalse) { jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; } break; } } jpeg_set_defaults(&jpeg_info); if (jpeg_info.in_color_space == JCS_CMYK) jpeg_set_colorspace(&jpeg_info,JCS_YCCK); if ((jpeg_info.data_precision != 12) && (image->depth <= 8)) jpeg_info.data_precision=8; else jpeg_info.data_precision=BITS_IN_JSAMPLE; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { /* Set image resolution. */ jpeg_info.write_JFIF_header=TRUE; jpeg_info.X_density=(UINT16) image->x_resolution; jpeg_info.Y_density=(UINT16) image->y_resolution; /* Set image resolution units. */ if (image->units == PixelsPerInchResolution) jpeg_info.density_unit=(UINT8) 1; if (image->units == PixelsPerCentimeterResolution) jpeg_info.density_unit=(UINT8) 2; } jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:optimize-coding"); if (option != (const char *) NULL) jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; else { MagickSizeType length; length=(MagickSizeType) jpeg_info.input_components*image->columns* image->rows*sizeof(JSAMPLE); if (length == (MagickSizeType) ((size_t) length)) { /* Perform optimization only if available memory resources permit it. */ status=AcquireMagickResource(MemoryResource,length); RelinquishMagickResource(MemoryResource,length); jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE; } } #if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED) if ((LocaleCompare(image_info->magick,"PJPEG") == 0) || (image_info->interlace != NoInterlace)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); jpeg_simple_progression(&jpeg_info); } else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: non-progressive"); #else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); #endif quality=92; if ((image_info->compression != LosslessJPEGCompression) && (image->quality <= 100)) { if (image->quality != UndefinedCompressionQuality) quality=(int) image->quality; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g", (double) image->quality); } else { #if !defined(C_LOSSLESS_SUPPORTED) quality=100; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100"); #else if (image->quality < 100) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"LosslessToLossyJPEGConversion",image->filename); else { int point_transform, predictor; predictor=image->quality/100; /* range 1-7 */ point_transform=image->quality % 20; /* range 0-15 */ jpeg_simple_lossless(&jpeg_info,predictor,point_transform); if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Compression: lossless"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Predictor: %d",predictor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Point Transform: %d",point_transform); } } #endif } option=GetImageOption(image_info,"jpeg:extent"); if (option != (const char *) NULL) { Image *jpeg_image; ImageInfo *jpeg_info; jpeg_info=CloneImageInfo(image_info); jpeg_info->blob=NULL; jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image != (Image *) NULL) { MagickSizeType extent; size_t maximum, minimum; /* Search for compression quality that does not exceed image extent. */ jpeg_image->quality=0; extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0); (void) DeleteImageOption(jpeg_info,"jpeg:extent"); (void) DeleteImageArtifact(jpeg_image,"jpeg:extent"); maximum=image_info->quality; if (maximum < 2) maximum=101; for (minimum=2; minimum < maximum; ) { (void) AcquireUniqueFilename(jpeg_image->filename); jpeg_image->quality=minimum+(maximum-minimum+1)/2; (void) WriteJPEGImage(jpeg_info,jpeg_image); if (GetBlobSize(jpeg_image) <= extent) minimum=jpeg_image->quality+1; else maximum=jpeg_image->quality-1; (void) RelinquishUniqueFileResource(jpeg_image->filename); } quality=(int) minimum-1; jpeg_image=DestroyImage(jpeg_image); } jpeg_info=DestroyImageInfo(jpeg_info); } jpeg_set_quality(&jpeg_info,quality,TRUE); #if (JPEG_LIB_VERSION >= 70) option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) { GeometryInfo geometry_info; int flags; /* Set quality scaling for luminance and chrominance separately. */ flags=ParseGeometry(option,&geometry_info); if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0)) { jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int) (geometry_info.rho+0.5)); jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int) (geometry_info.sigma+0.5)); jpeg_default_qtables(&jpeg_info,TRUE); } } #endif colorspace=jpeg_info.in_color_space; value=GetImageOption(image_info,"jpeg:colorspace"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:colorspace"); if (value != (char *) NULL) colorspace=StringToInteger(value); sampling_factor=(const char *) NULL; if (colorspace == jpeg_info.in_color_space) { value=GetImageOption(image_info,"jpeg:sampling-factor"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor == (const char *) NULL) { if (quality >= 90) for (i=0; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } else { char **factors; GeometryInfo geometry_info; MagickStatusType flags; /* Set sampling factor. */ i=0; factors=SamplingFactorToList(sampling_factor); if (factors != (char **) NULL) { for (i=0; i < MAX_COMPONENTS; i++) { if (factors[i] == (char *) NULL) break; flags=ParseGeometry(factors[i],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho; jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma; factors[i]=(char *) RelinquishMagickMemory(factors[i]); } factors=(char **) RelinquishMagickMemory(factors); } for ( ; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } option=GetImageOption(image_info,"jpeg:q-table"); if (option != (const char *) NULL) { QuantizationTable *table; /* Custom quantization tables. */ table=GetQuantizationTable(option,"0",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=0; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=0; jpeg_add_quant_table(&jpeg_info,0,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"1",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=1; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=1; jpeg_add_quant_table(&jpeg_info,1,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"2",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=2; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=2; jpeg_add_quant_table(&jpeg_info,2,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"3",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=3; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=3; jpeg_add_quant_table(&jpeg_info,3,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } } jpeg_start_compress(&jpeg_info,TRUE); if (image->debug != MagickFalse) { if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g", (double) image->depth); if (image->colors != 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: %.20g",(double) image->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: unspecified"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "JPEG data precision: %d",(int) jpeg_info.data_precision); switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); break; } default: break; } switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAY"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor); break; } case sRGBColorspace: case RGBColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image colorspace is RGB"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", image->colorspace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } } } /* Write JPEG profiles. */ value=GetImageProperty(image,"comment"); if (value != (char *) NULL) for (i=0; i < (ssize_t) strlen(value); i+=65533L) jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i, (unsigned int) MagickMin((size_t) strlen(value+i),65533L)); if (image->profiles != (void *) NULL) WriteProfile(&jpeg_info,image); /* Convert MIFF to JPEG raster pixels. */ memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.input_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickFalse); } scanline[0]=(JSAMPROW) jpeg_pixels; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (scale == 0) scale=1; if (jpeg_info.data_precision <= 8) { if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p)); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum( GetPixelLuma(image,p))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelCyan(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelMagenta(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelYellow(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelBlack(indexes+x)))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum( GetPixelLuma(image,p)))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange- GetPixelIndex(indexes+x))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (y == (ssize_t) image->rows) jpeg_finish_compress(&jpeg_info); /* Relinquish resources. */ jpeg_destroy_compress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4777_0
crossvul-cpp_data_good_2019_5
/* * src/interfaces/ecpg/pgtypeslib/timestamp.c */ #include "postgres_fe.h" #include <time.h> #include <float.h> #include <limits.h> #include <math.h> #ifdef __FAST_MATH__ #error -ffast-math is known to break this code #endif #include "extern.h" #include "dt.h" #include "pgtypes_timestamp.h" #include "pgtypes_date.h" #ifdef HAVE_INT64_TIMESTAMP static int64 time2t(const int hour, const int min, const int sec, const fsec_t fsec) { return (((((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec) * USECS_PER_SEC) + fsec; } /* time2t() */ #else static double time2t(const int hour, const int min, const int sec, const fsec_t fsec) { return (((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec + fsec; } /* time2t() */ #endif static timestamp dt2local(timestamp dt, int tz) { #ifdef HAVE_INT64_TIMESTAMP dt -= (tz * USECS_PER_SEC); #else dt -= tz; #endif return dt; } /* dt2local() */ /* tm2timestamp() * Convert a tm structure to a timestamp data type. * Note that year is _not_ 1900-based, but is an explicit full value. * Also, month is one-based, _not_ zero-based. * * Returns -1 on failure (overflow). */ int tm2timestamp(struct tm * tm, fsec_t fsec, int *tzp, timestamp * result) { #ifdef HAVE_INT64_TIMESTAMP int dDate; int64 time; #else double dDate, time; #endif /* Julian day routines are not correct for negative Julian days */ if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday)) return -1; dDate = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - date2j(2000, 1, 1); time = time2t(tm->tm_hour, tm->tm_min, tm->tm_sec, fsec); #ifdef HAVE_INT64_TIMESTAMP *result = (dDate * USECS_PER_DAY) + time; /* check for major overflow */ if ((*result - time) / USECS_PER_DAY != dDate) return -1; /* check for just-barely overflow (okay except time-of-day wraps) */ /* caution: we want to allow 1999-12-31 24:00:00 */ if ((*result < 0 && dDate > 0) || (*result > 0 && dDate < -1)) return -1; #else *result = dDate * SECS_PER_DAY + time; #endif if (tzp != NULL) *result = dt2local(*result, -(*tzp)); return 0; } /* tm2timestamp() */ static timestamp SetEpochTimestamp(void) { #ifdef HAVE_INT64_TIMESTAMP int64 noresult = 0; #else double noresult = 0.0; #endif timestamp dt; struct tm tt, *tm = &tt; if (GetEpochTime(tm) < 0) return noresult; tm2timestamp(tm, 0, NULL, &dt); return dt; } /* SetEpochTimestamp() */ /* timestamp2tm() * Convert timestamp data type to POSIX time structure. * Note that year is _not_ 1900-based, but is an explicit full value. * Also, month is one-based, _not_ zero-based. * Returns: * 0 on success * -1 on out of range * * For dates within the system-supported time_t range, convert to the * local time zone. If out of this range, leave as GMT. - tgl 97/05/27 */ static int timestamp2tm(timestamp dt, int *tzp, struct tm * tm, fsec_t *fsec, const char **tzn) { #ifdef HAVE_INT64_TIMESTAMP int64 dDate, date0; int64 time; #else double dDate, date0; double time; #endif #if defined(HAVE_TM_ZONE) || defined(HAVE_INT_TIMEZONE) time_t utime; struct tm *tx; #endif date0 = date2j(2000, 1, 1); #ifdef HAVE_INT64_TIMESTAMP time = dt; TMODULO(time, dDate, USECS_PER_DAY); if (time < INT64CONST(0)) { time += USECS_PER_DAY; dDate -= 1; } /* add offset to go from J2000 back to standard Julian date */ dDate += date0; /* Julian day routine does not work for negative Julian days */ if (dDate < 0 || dDate > (timestamp) INT_MAX) return -1; j2date((int) dDate, &tm->tm_year, &tm->tm_mon, &tm->tm_mday); dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec); #else time = dt; TMODULO(time, dDate, (double) SECS_PER_DAY); if (time < 0) { time += SECS_PER_DAY; dDate -= 1; } /* add offset to go from J2000 back to standard Julian date */ dDate += date0; recalc_d: /* Julian day routine does not work for negative Julian days */ if (dDate < 0 || dDate > (timestamp) INT_MAX) return -1; j2date((int) dDate, &tm->tm_year, &tm->tm_mon, &tm->tm_mday); recalc_t: dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec); *fsec = TSROUND(*fsec); /* roundoff may need to propagate to higher-order fields */ if (*fsec >= 1.0) { time = ceil(time); if (time >= (double) SECS_PER_DAY) { time = 0; dDate += 1; goto recalc_d; } goto recalc_t; } #endif if (tzp != NULL) { /* * Does this fall within the capabilities of the localtime() * interface? Then use this to rotate to the local time zone. */ if (IS_VALID_UTIME(tm->tm_year, tm->tm_mon, tm->tm_mday)) { #if defined(HAVE_TM_ZONE) || defined(HAVE_INT_TIMEZONE) #ifdef HAVE_INT64_TIMESTAMP utime = dt / USECS_PER_SEC + ((date0 - date2j(1970, 1, 1)) * INT64CONST(86400)); #else utime = dt + (date0 - date2j(1970, 1, 1)) * SECS_PER_DAY; #endif tx = localtime(&utime); tm->tm_year = tx->tm_year + 1900; tm->tm_mon = tx->tm_mon + 1; tm->tm_mday = tx->tm_mday; tm->tm_hour = tx->tm_hour; tm->tm_min = tx->tm_min; tm->tm_isdst = tx->tm_isdst; #if defined(HAVE_TM_ZONE) tm->tm_gmtoff = tx->tm_gmtoff; tm->tm_zone = tx->tm_zone; *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */ if (tzn != NULL) *tzn = tm->tm_zone; #elif defined(HAVE_INT_TIMEZONE) *tzp = (tm->tm_isdst > 0) ? TIMEZONE_GLOBAL - SECS_PER_HOUR : TIMEZONE_GLOBAL; if (tzn != NULL) *tzn = TZNAME_GLOBAL[(tm->tm_isdst > 0)]; #endif #else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */ *tzp = 0; /* Mark this as *no* time zone available */ tm->tm_isdst = -1; if (tzn != NULL) *tzn = NULL; #endif } else { *tzp = 0; /* Mark this as *no* time zone available */ tm->tm_isdst = -1; if (tzn != NULL) *tzn = NULL; } } else { tm->tm_isdst = -1; if (tzn != NULL) *tzn = NULL; } tm->tm_yday = dDate - date2j(tm->tm_year, 1, 1) + 1; return 0; } /* timestamp2tm() */ /* EncodeSpecialTimestamp() * * Convert reserved timestamp data type to string. * */ static int EncodeSpecialTimestamp(timestamp dt, char *str) { if (TIMESTAMP_IS_NOBEGIN(dt)) strcpy(str, EARLY); else if (TIMESTAMP_IS_NOEND(dt)) strcpy(str, LATE); else return FALSE; return TRUE; } /* EncodeSpecialTimestamp() */ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr) { timestamp result; #ifdef HAVE_INT64_TIMESTAMP int64 noresult = 0; #else double noresult = 0.0; #endif fsec_t fsec; struct tm tt, *tm = &tt; int dtype; int nf; char *field[MAXDATEFIELDS]; int ftype[MAXDATEFIELDS]; char lowstr[MAXDATELEN + MAXDATEFIELDS]; char *realptr; char **ptr = (endptr != NULL) ? endptr : &realptr; if (strlen(str) > MAXDATELEN) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } if (ParseDateTime(str, lowstr, field, ftype, &nf, ptr) != 0 || DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, 0) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } switch (dtype) { case DTK_DATE: if (tm2timestamp(tm, fsec, NULL, &result) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } break; case DTK_EPOCH: result = SetEpochTimestamp(); break; case DTK_LATE: TIMESTAMP_NOEND(result); break; case DTK_EARLY: TIMESTAMP_NOBEGIN(result); break; case DTK_INVALID: errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); default: errno = PGTYPES_TS_BAD_TIMESTAMP; return (noresult); } /* AdjustTimestampForTypmod(&result, typmod); */ /* * Since it's difficult to test for noresult, make sure errno is 0 if no * error occurred. */ errno = 0; return result; } char * PGTYPEStimestamp_to_asc(timestamp tstamp) { struct tm tt, *tm = &tt; char buf[MAXDATELEN + 1]; fsec_t fsec; int DateStyle = 1; /* this defaults to ISO_DATES, shall we make * it an option? */ if (TIMESTAMP_NOT_FINITE(tstamp)) EncodeSpecialTimestamp(tstamp, buf); else if (timestamp2tm(tstamp, NULL, tm, &fsec, NULL) == 0) EncodeDateTime(tm, fsec, false, 0, NULL, DateStyle, buf, 0); else { errno = PGTYPES_TS_BAD_TIMESTAMP; return NULL; } return pgtypes_strdup(buf); } void PGTYPEStimestamp_current(timestamp * ts) { struct tm tm; GetCurrentDateTime(&tm); if (errno == 0) tm2timestamp(&tm, 0, NULL, ts); return; } static int dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm, char *output, int *pstr_len, const char *fmtstr) { union un_fmt_comb replace_val; int replace_type; int i; const char *p = fmtstr; char *q = output; while (*p) { if (*p == '%') { p++; /* fix compiler warning */ replace_type = PGTYPES_TYPE_NOTHING; switch (*p) { /* the abbreviated name of the day in the week */ /* XXX should be locale aware */ case 'a': replace_val.str_val = pgtypes_date_weekdays_short[dow]; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* the full name of the day in the week */ /* XXX should be locale aware */ case 'A': replace_val.str_val = days[dow]; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* the abbreviated name of the month */ /* XXX should be locale aware */ case 'b': case 'h': replace_val.str_val = months[tm->tm_mon]; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* the full name name of the month */ /* XXX should be locale aware */ case 'B': replace_val.str_val = pgtypes_date_months[tm->tm_mon]; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* * The preferred date and time representation for * the current locale. */ case 'c': /* XXX */ break; /* the century number with leading zeroes */ case 'C': replace_val.uint_val = tm->tm_year / 100; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* day with leading zeroes (01 - 31) */ case 'd': replace_val.uint_val = tm->tm_mday; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* the date in the format mm/dd/yy */ case 'D': /* * ts, dDate, dow, tm is information about the timestamp * * q is the start of the current output buffer * * pstr_len is a pointer to the remaining size of output, * i.e. the size of q */ i = dttofmtasc_replace(ts, dDate, dow, tm, q, pstr_len, "%m/%d/%y"); if (i) return i; break; /* day with leading spaces (01 - 31) */ case 'e': replace_val.uint_val = tm->tm_mday; replace_type = PGTYPES_TYPE_UINT_2_LS; break; /* * alternative format modifier */ case 'E': { char tmp[4] = "%Ex"; p++; if (*p == '\0') return -1; tmp[2] = *p; /* * strftime's month is 0 based, ours is 1 based */ tm->tm_mon -= 1; i = strftime(q, *pstr_len, tmp, tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; } /* * The ISO 8601 year with century as a decimal number. The * 4-digit year corresponding to the ISO week number. */ case 'G': { /* Keep compiler quiet - Don't use a literal format */ const char *fmt = "%G"; tm->tm_mon -= 1; i = strftime(q, *pstr_len, fmt, tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; } break; /* * Like %G, but without century, i.e., with a 2-digit year * (00-99). */ case 'g': { const char *fmt = "%g"; /* Keep compiler quiet about * 2-digit year */ tm->tm_mon -= 1; i = strftime(q, *pstr_len, fmt, tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; } break; /* hour (24 hour clock) with leading zeroes */ case 'H': replace_val.uint_val = tm->tm_hour; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* hour (12 hour clock) with leading zeroes */ case 'I': replace_val.uint_val = tm->tm_hour % 12; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* * The day of the year as a decimal number with leading * zeroes. It ranges from 001 to 366. */ case 'j': replace_val.uint_val = tm->tm_yday; replace_type = PGTYPES_TYPE_UINT_3_LZ; break; /* * The hour (24 hour clock). Leading zeroes will be turned * into spaces. */ case 'k': replace_val.uint_val = tm->tm_hour; replace_type = PGTYPES_TYPE_UINT_2_LS; break; /* * The hour (12 hour clock). Leading zeroes will be turned * into spaces. */ case 'l': replace_val.uint_val = tm->tm_hour % 12; replace_type = PGTYPES_TYPE_UINT_2_LS; break; /* The month as a decimal number with a leading zero */ case 'm': replace_val.uint_val = tm->tm_mon; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* The minute as a decimal number with a leading zero */ case 'M': replace_val.uint_val = tm->tm_min; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* A newline character */ case 'n': replace_val.char_val = '\n'; replace_type = PGTYPES_TYPE_CHAR; break; /* the AM/PM specifier (uppercase) */ /* XXX should be locale aware */ case 'p': if (tm->tm_hour < 12) replace_val.str_val = "AM"; else replace_val.str_val = "PM"; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* the AM/PM specifier (lowercase) */ /* XXX should be locale aware */ case 'P': if (tm->tm_hour < 12) replace_val.str_val = "am"; else replace_val.str_val = "pm"; replace_type = PGTYPES_TYPE_STRING_CONSTANT; break; /* the time in the format %I:%M:%S %p */ /* XXX should be locale aware */ case 'r': i = dttofmtasc_replace(ts, dDate, dow, tm, q, pstr_len, "%I:%M:%S %p"); if (i) return i; break; /* The time in 24 hour notation (%H:%M) */ case 'R': i = dttofmtasc_replace(ts, dDate, dow, tm, q, pstr_len, "%H:%M"); if (i) return i; break; /* The number of seconds since the Epoch (1970-01-01) */ case 's': #ifdef HAVE_INT64_TIMESTAMP replace_val.int64_val = (*ts - SetEpochTimestamp()) / 1000000.0; replace_type = PGTYPES_TYPE_INT64; #else replace_val.double_val = *ts - SetEpochTimestamp(); replace_type = PGTYPES_TYPE_DOUBLE_NF; #endif break; /* seconds as a decimal number with leading zeroes */ case 'S': replace_val.uint_val = tm->tm_sec; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* A tabulator */ case 't': replace_val.char_val = '\t'; replace_type = PGTYPES_TYPE_CHAR; break; /* The time in 24 hour notation (%H:%M:%S) */ case 'T': i = dttofmtasc_replace(ts, dDate, dow, tm, q, pstr_len, "%H:%M:%S"); if (i) return i; break; /* * The day of the week as a decimal, Monday = 1, Sunday = * 7 */ case 'u': replace_val.uint_val = dow; if (replace_val.uint_val == 0) replace_val.uint_val = 7; replace_type = PGTYPES_TYPE_UINT; break; /* The week number of the year as a decimal number */ case 'U': tm->tm_mon -= 1; i = strftime(q, *pstr_len, "%U", tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; /* * The ISO 8601:1988 week number of the current year as a * decimal number. */ case 'V': { /* Keep compiler quiet - Don't use a literal format */ const char *fmt = "%V"; i = strftime(q, *pstr_len, fmt, tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } replace_type = PGTYPES_TYPE_NOTHING; } break; /* * The day of the week as a decimal, Sunday being 0 and * Monday 1. */ case 'w': replace_val.uint_val = dow; replace_type = PGTYPES_TYPE_UINT; break; /* The week number of the year (another definition) */ case 'W': tm->tm_mon -= 1; i = strftime(q, *pstr_len, "%U", tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; /* * The preferred date representation for the current * locale without the time. */ case 'x': { const char *fmt = "%x"; /* Keep compiler quiet about * 2-digit year */ tm->tm_mon -= 1; i = strftime(q, *pstr_len, fmt, tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; } break; /* * The preferred time representation for the current * locale without the date. */ case 'X': tm->tm_mon -= 1; i = strftime(q, *pstr_len, "%X", tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; /* The year without the century (2 digits, leading zeroes) */ case 'y': replace_val.uint_val = tm->tm_year % 100; replace_type = PGTYPES_TYPE_UINT_2_LZ; break; /* The year with the century (4 digits) */ case 'Y': replace_val.uint_val = tm->tm_year; replace_type = PGTYPES_TYPE_UINT; break; /* The time zone offset from GMT */ case 'z': tm->tm_mon -= 1; i = strftime(q, *pstr_len, "%z", tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; /* The name or abbreviation of the time zone */ case 'Z': tm->tm_mon -= 1; i = strftime(q, *pstr_len, "%Z", tm); if (i == 0) return -1; while (*q) { q++; (*pstr_len)--; } tm->tm_mon += 1; replace_type = PGTYPES_TYPE_NOTHING; break; /* A % sign */ case '%': replace_val.char_val = '%'; replace_type = PGTYPES_TYPE_CHAR; break; case '\0': /* fmtstr: foo%' - The string ends with a % sign */ /* * this is not compliant to the specification */ return -1; default: /* * if we don't know the pattern, we just copy it */ if (*pstr_len > 1) { *q = '%'; q++; (*pstr_len)--; if (*pstr_len > 1) { *q = *p; q++; (*pstr_len)--; } else { *q = '\0'; return -1; } *q = '\0'; } else return -1; break; } i = pgtypes_fmt_replace(replace_val, replace_type, &q, pstr_len); if (i) return i; } else { if (*pstr_len > 1) { *q = *p; (*pstr_len)--; q++; *q = '\0'; } else return -1; } p++; } return 0; } int PGTYPEStimestamp_fmt_asc(timestamp * ts, char *output, int str_len, const char *fmtstr) { struct tm tm; fsec_t fsec; date dDate; int dow; dDate = PGTYPESdate_from_timestamp(*ts); dow = PGTYPESdate_dayofweek(dDate); timestamp2tm(*ts, NULL, &tm, &fsec, NULL); return dttofmtasc_replace(ts, dDate, dow, &tm, output, &str_len, fmtstr); } int PGTYPEStimestamp_sub(timestamp * ts1, timestamp * ts2, interval * iv) { if (TIMESTAMP_NOT_FINITE(*ts1) || TIMESTAMP_NOT_FINITE(*ts2)) return PGTYPES_TS_ERR_EINFTIME; else iv->time = (*ts1 - *ts2); iv->month = 0; return 0; } int PGTYPEStimestamp_defmt_asc(char *str, const char *fmt, timestamp * d) { int year, month, day; int hour, minute, second; int tz; int i; char *mstr; char *mfmt; if (!fmt) fmt = "%Y-%m-%d %H:%M:%S"; if (!fmt[0]) return 1; mstr = pgtypes_strdup(str); mfmt = pgtypes_strdup(fmt); /* * initialize with impossible values so that we can see if the fields * where specified at all */ /* XXX ambiguity with 1 BC for year? */ year = -1; month = -1; day = -1; hour = 0; minute = -1; second = -1; tz = 0; i = PGTYPEStimestamp_defmt_scan(&mstr, mfmt, d, &year, &month, &day, &hour, &minute, &second, &tz); free(mstr); free(mfmt); return i; } /* * add an interval to a time stamp * * *tout = tin + span * * returns 0 if successful * returns -1 if it fails * */ int PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout) { if (TIMESTAMP_NOT_FINITE(*tin)) *tout = *tin; else { if (span->month != 0) { struct tm tt, *tm = &tt; fsec_t fsec; if (timestamp2tm(*tin, NULL, tm, &fsec, NULL) != 0) return -1; tm->tm_mon += span->month; if (tm->tm_mon > MONTHS_PER_YEAR) { tm->tm_year += (tm->tm_mon - 1) / MONTHS_PER_YEAR; tm->tm_mon = (tm->tm_mon - 1) % MONTHS_PER_YEAR + 1; } else if (tm->tm_mon < 1) { tm->tm_year += tm->tm_mon / MONTHS_PER_YEAR - 1; tm->tm_mon = tm->tm_mon % MONTHS_PER_YEAR + MONTHS_PER_YEAR; } /* adjust for end of month boundary problems... */ if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1]) tm->tm_mday = (day_tab[isleap(tm->tm_year)][tm->tm_mon - 1]); if (tm2timestamp(tm, fsec, NULL, tin) != 0) return -1; } *tin += span->time; *tout = *tin; } return 0; } /* * subtract an interval from a time stamp * * *tout = tin - span * * returns 0 if successful * returns -1 if it fails * */ int PGTYPEStimestamp_sub_interval(timestamp * tin, interval * span, timestamp * tout) { interval tspan; tspan.month = -span->month; tspan.time = -span->time; return PGTYPEStimestamp_add_interval(tin, &tspan, tout); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_2019_5
crossvul-cpp_data_good_3414_0
/* * XWD image format * * Copyright (c) 2012 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" #include "internal.h" #include "xwd.h" static int xwd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; const uint8_t *buf = avpkt->data; int i, ret, buf_size = avpkt->size; uint32_t version, header_size, vclass, ncolors; uint32_t xoffset, be, bpp, lsize, rsize; uint32_t pixformat, pixdepth, bunit, bitorder, bpad; uint32_t rgb[3]; uint8_t *ptr; GetByteContext gb; if (buf_size < XWD_HEADER_SIZE) return AVERROR_INVALIDDATA; bytestream2_init(&gb, buf, buf_size); header_size = bytestream2_get_be32u(&gb); version = bytestream2_get_be32u(&gb); if (version != XWD_VERSION) { av_log(avctx, AV_LOG_ERROR, "unsupported version\n"); return AVERROR_INVALIDDATA; } if (buf_size < header_size || header_size < XWD_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "invalid header size\n"); return AVERROR_INVALIDDATA; } pixformat = bytestream2_get_be32u(&gb); pixdepth = bytestream2_get_be32u(&gb); avctx->width = bytestream2_get_be32u(&gb); avctx->height = bytestream2_get_be32u(&gb); xoffset = bytestream2_get_be32u(&gb); be = bytestream2_get_be32u(&gb); bunit = bytestream2_get_be32u(&gb); bitorder = bytestream2_get_be32u(&gb); bpad = bytestream2_get_be32u(&gb); bpp = bytestream2_get_be32u(&gb); lsize = bytestream2_get_be32u(&gb); vclass = bytestream2_get_be32u(&gb); rgb[0] = bytestream2_get_be32u(&gb); rgb[1] = bytestream2_get_be32u(&gb); rgb[2] = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, 8); ncolors = bytestream2_get_be32u(&gb); bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20)); av_log(avctx, AV_LOG_DEBUG, "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n", pixformat, pixdepth, bunit, bitorder, bpad); av_log(avctx, AV_LOG_DEBUG, "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n", vclass, ncolors, bpp, be, lsize, xoffset); av_log(avctx, AV_LOG_DEBUG, "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n", rgb[0], rgb[1], rgb[2]); if (pixformat > XWD_Z_PIXMAP) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n"); return AVERROR_INVALIDDATA; } if (pixdepth == 0 || pixdepth > 32) { av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n"); return AVERROR_INVALIDDATA; } if (xoffset) { avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset); return AVERROR_PATCHWELCOME; } if (be > 1) { av_log(avctx, AV_LOG_ERROR, "invalid byte order\n"); return AVERROR_INVALIDDATA; } if (bitorder > 1) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n"); return AVERROR_INVALIDDATA; } if (bunit != 8 && bunit != 16 && bunit != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n"); return AVERROR_INVALIDDATA; } if (bpad != 8 && bpad != 16 && bpad != 32) { av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n"); return AVERROR_INVALIDDATA; } if (bpp == 0 || bpp > 32) { av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n"); return AVERROR_INVALIDDATA; } if (ncolors > 256) { av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n"); return AVERROR_INVALIDDATA; } if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0) return ret; rsize = FFALIGN(avctx->width * bpp, bpad) / 8; if (lsize < rsize) { av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) { av_log(avctx, AV_LOG_ERROR, "input buffer too small\n"); return AVERROR_INVALIDDATA; } if (pixformat != XWD_Z_PIXMAP) { avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat); return AVERROR_PATCHWELCOME; } avctx->pix_fmt = AV_PIX_FMT_NONE; switch (vclass) { case XWD_STATIC_GRAY: case XWD_GRAY_SCALE: if (bpp != 1 && bpp != 8) return AVERROR_INVALIDDATA; if (bpp == 1 && pixdepth == 1) { avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; } else if (bpp == 8 && pixdepth == 8) { avctx->pix_fmt = AV_PIX_FMT_GRAY8; } break; case XWD_STATIC_COLOR: case XWD_PSEUDO_COLOR: if (bpp == 8) avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case XWD_TRUE_COLOR: case XWD_DIRECT_COLOR: if (bpp != 16 && bpp != 24 && bpp != 32) return AVERROR_INVALIDDATA; if (bpp == 16 && pixdepth == 15) { if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE; else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00) avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE; } else if (bpp == 16 && pixdepth == 16) { if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F) avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE; else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800) avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE; } else if (bpp == 24) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24; } else if (bpp == 32) { if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF) avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA; else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000) avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA; } bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "invalid visual class\n"); return AVERROR_INVALIDDATA; } if (avctx->pix_fmt == AV_PIX_FMT_NONE) { avpriv_request_sample(avctx, "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"", bpp, pixdepth, vclass); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint32_t *dst = (uint32_t *)p->data[1]; uint8_t red, green, blue; for (i = 0; i < ncolors; i++) { bytestream2_skipu(&gb, 4); // skip colormap entry number red = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); green = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 1); blue = bytestream2_get_byteu(&gb); bytestream2_skipu(&gb, 3); // skip bitmask flag and padding dst[i] = red << 16 | green << 8 | blue; } } ptr = p->data[0]; for (i = 0; i < avctx->height; i++) { bytestream2_get_bufferu(&gb, ptr, rsize); bytestream2_skipu(&gb, lsize - rsize); ptr += p->linesize[0]; } *got_frame = 1; return buf_size; } AVCodec ff_xwd_decoder = { .name = "xwd", .long_name = NULL_IF_CONFIG_SMALL("XWD (X Window Dump) image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_XWD, .decode = xwd_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
./CrossVul/dataset_final_sorted/CWE-119/c/good_3414_0
crossvul-cpp_data_good_346_3
/* * card-tcos.c: Support for TCOS cards * * Copyright (C) 2011 Peter Koch <pk@opensc-project.org> * Copyright (C) 2002 g10 Code GmbH * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include <ctype.h> #include <time.h> #include <stdlib.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static struct sc_atr_table tcos_atrs[] = { /* Infineon SLE44 */ { "3B:BA:13:00:81:31:86:5D:00:64:05:0A:02:01:31:80:90:00:8B", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66S */ { "3B:BA:14:00:81:31:86:5D:00:64:05:14:02:02:31:80:90:00:91", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX320P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:60:02:03:31:80:90:00:66", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX322P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:7B:02:03:31:80:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Philips P5CT072 */ { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:03:01:31:C0:73:F7:01:D0:00:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:04:0F:31:C0:73:F7:01:D0:00:90:00:74", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, /* Philips P5CT080 */ { "3B:BF:B6:00:81:31:FE:5D:00:64:04:28:03:02:31:C0:73:F7:01:D0:00:90:00:67", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; static struct sc_card_operations tcos_ops; static struct sc_card_driver tcos_drv = { "TCOS 3.0", "tcos", &tcos_ops, NULL, 0, NULL }; static const struct sc_card_operations *iso_ops = NULL; typedef struct tcos_data_st { unsigned int pad_flags; unsigned int next_sign; } tcos_data; static int tcos_finish(sc_card_t *card) { free(card->drv_data); return 0; } static int tcos_match_card(sc_card_t *card) { int i; i = _sc_match_atr(card, tcos_atrs, &card->type); if (i < 0) return 0; return 1; } static int tcos_init(sc_card_t *card) { unsigned long flags; tcos_data *data = malloc(sizeof(tcos_data)); if (!data) return SC_ERROR_OUT_OF_MEMORY; card->name = "TCOS"; card->drv_data = (void *)data; card->cla = 0x00; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_PAD_PKCS1; flags |= SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); if (card->type == SC_CARD_TYPE_TCOS_V3) { card->caps |= SC_CARD_CAP_APDU_EXT; _sc_card_add_rsa_alg(card, 1280, flags, 0); _sc_card_add_rsa_alg(card, 1536, flags, 0); _sc_card_add_rsa_alg(card, 1792, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return 0; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static int tcos_construct_fci(const sc_file_t *file, u8 *out, size_t *outlen) { u8 *p = out; u8 buf[64]; size_t n; /* FIXME: possible buffer overflow */ *p++ = 0x6F; /* FCI */ p++; /* File size */ buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x81, buf, 2, p, 16, &p); /* File descriptor */ n = 0; buf[n] = file->shareable ? 0x40 : 0; switch (file->type) { case SC_FILE_TYPE_WORKING_EF: break; case SC_FILE_TYPE_DF: buf[0] |= 0x38; break; default: return SC_ERROR_NOT_SUPPORTED; } buf[n++] |= file->ef_structure & 7; if ( (file->ef_structure & 7) > 1) { /* record structured file */ buf[n++] = 0x41; /* indicate 3rd byte */ buf[n++] = file->record_length; } sc_asn1_put_tag(0x82, buf, n, p, 8, &p); /* File identifier */ buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, 16, &p); /* Directory name */ if (file->type == SC_FILE_TYPE_DF) { if (file->namelen) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, 16, &p); } else { /* TCOS needs one, so we use a faked one */ snprintf ((char *) buf, sizeof(buf)-1, "foo-%lu", (unsigned long) time (NULL)); sc_asn1_put_tag(0x84, buf, strlen ((char *) buf), p, 16, &p); } } /* File descriptor extension */ if (file->prop_attr_len && file->prop_attr) { n = file->prop_attr_len; memcpy(buf, file->prop_attr, n); } else { n = 0; buf[n++] = 0x01; /* not invalidated, permanent */ if (file->type == SC_FILE_TYPE_WORKING_EF) buf[n++] = 0x00; /* generic data file */ } sc_asn1_put_tag(0x85, buf, n, p, 16, &p); /* Security attributes */ if (file->sec_attr_len && file->sec_attr) { memcpy(buf, file->sec_attr, file->sec_attr_len); n = file->sec_attr_len; } else { /* no attributes given - fall back to default one */ memcpy (buf+ 0, "\xa4\x00\x00\x00\xff\xff", 6); /* select */ memcpy (buf+ 6, "\xb0\x00\x00\x00\xff\xff", 6); /* read bin */ memcpy (buf+12, "\xd6\x00\x00\x00\xff\xff", 6); /* upd bin */ memcpy (buf+18, "\x60\x00\x00\x00\xff\xff", 6); /* admin grp*/ n = 24; } sc_asn1_put_tag(0x86, buf, n, p, sizeof (buf), &p); /* fixup length of FCI */ out[1] = p - out - 2; *outlen = p - out; return 0; } static int tcos_create_file(sc_card_t *card, sc_file_t *file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_apdu_t apdu; len = SC_MAX_APDU_BUFFER_SIZE; r = tcos_construct_fci(file, sbuf, &len); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "tcos_construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.cla |= 0x80; /* this is an proprietary extension */ apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static unsigned int map_operations (int commandbyte ) { unsigned int op = (unsigned int)-1; switch ( (commandbyte & 0xfe) ) { case 0xe2: /* append record */ op = SC_AC_OP_UPDATE; break; case 0x24: /* change password */ op = SC_AC_OP_UPDATE; break; case 0xe0: /* create */ op = SC_AC_OP_CREATE; break; case 0xe4: /* delete */ op = SC_AC_OP_DELETE; break; case 0xe8: /* exclude sfi */ op = SC_AC_OP_WRITE; break; case 0x82: /* external auth */ op = SC_AC_OP_READ; break; case 0xe6: /* include sfi */ op = SC_AC_OP_WRITE; break; case 0x88: /* internal auth */ op = SC_AC_OP_READ; break; case 0x04: /* invalidate */ op = SC_AC_OP_INVALIDATE; break; case 0x2a: /* perform sec. op */ op = SC_AC_OP_SELECT; break; case 0xb0: /* read binary */ op = SC_AC_OP_READ; break; case 0xb2: /* read record */ op = SC_AC_OP_READ; break; case 0x44: /* rehabilitate */ op = SC_AC_OP_REHABILITATE; break; case 0xa4: /* select */ op = SC_AC_OP_SELECT; break; case 0xee: /* set permanent */ op = SC_AC_OP_CREATE; break; case 0x2c: /* unblock password */op = SC_AC_OP_WRITE; break; case 0xd6: /* update binary */ op = SC_AC_OP_WRITE; break; case 0xdc: /* update record */ op = SC_AC_OP_WRITE; break; case 0x20: /* verify password */ op = SC_AC_OP_SELECT; break; case 0x60: /* admin group */ op = SC_AC_OP_CREATE; break; } return op; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static void parse_sec_attr(sc_card_t *card, sc_file_t *file, const u8 *buf, size_t len) { unsigned int op; /* list directory is not covered by ACLs - so always add an entry */ sc_file_add_acl_entry (file, SC_AC_OP_LIST_FILES, SC_AC_NONE, SC_AC_KEY_REF_NONE); /* FIXME: check for what LOCK is used */ sc_file_add_acl_entry (file, SC_AC_OP_LOCK, SC_AC_NONE, SC_AC_KEY_REF_NONE); for (; len >= 6; len -= 6, buf += 6) { /* FIXME: temporary hacks */ if (!memcmp(buf, "\xa4\x00\x00\x00\xff\xff", 6)) /* select */ sc_file_add_acl_entry (file, SC_AC_OP_SELECT, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\xb0\x00\x00\x00\xff\xff", 6)) /*read*/ sc_file_add_acl_entry (file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\xd6\x00\x00\x00\xff\xff", 6)) /*upd*/ sc_file_add_acl_entry (file, SC_AC_OP_UPDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\x60\x00\x00\x00\xff\xff", 6)) {/*adm */ sc_file_add_acl_entry (file, SC_AC_OP_WRITE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_CREATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_INVALIDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_REHABILITATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else { /* the first byte tells use the command or the command group. We have to mask bit 0 because this one distinguish between AND/OR combination of PINs*/ op = map_operations (buf[0]); if (op == (unsigned int)-1) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unknown security command byte %02x\n", buf[0]); continue; } if (!buf[1]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_CHV, buf[1]); if (!buf[2] && !buf[3]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_TERM, (buf[2]<<8)|buf[3]); } } } static int tcos_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out) { sc_context_t *ctx; sc_apdu_t apdu; sc_file_t *file=NULL; u8 buf[SC_MAX_APDU_BUFFER_SIZE], pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; unsigned int i; int r, pathlen; assert(card != NULL && in_path != NULL); ctx=card->ctx; memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0, 0x04); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; /* fall through */ case SC_PATH_TYPE_FROM_CURRENT: apdu.p1 = 9; break; case SC_PATH_TYPE_DF_NAME: apdu.p1 = 4; break; case SC_PATH_TYPE_PATH: apdu.p1 = 8; if (pathlen >= 2 && memcmp(path, "\x3F\x00", 2) == 0) path += 2, pathlen -= 2; if (pathlen == 0) apdu.p1 = 0; break; case SC_PATH_TYPE_PARENT: apdu.p1 = 3; pathlen = 0; break; default: SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if( pathlen == 0 ) apdu.cse = SC_APDU_CASE_2_SHORT; apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 256; } else { apdu.resplen = 0; apdu.le = 0; apdu.p2 = 0x0C; apdu.cse = (pathlen == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r || file_out == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, r); if (apdu.resplen < 1 || apdu.resp[0] != 0x62){ sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "received invalid template %02X\n", apdu.resp[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } file = sc_file_new(); if (file == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); *file_out = file; file->path = *in_path; for(i=2; i+1<apdu.resplen && i+1+apdu.resp[i+1]<apdu.resplen; i+=2+apdu.resp[i+1]){ size_t j, len=apdu.resp[i+1]; unsigned char type=apdu.resp[i], *d=apdu.resp+i+2; switch (type) { case 0x80: case 0x81: file->size=0; for(j=0; j<len; ++j) file->size = (file->size<<8) | d[j]; break; case 0x82: file->shareable = (d[0] & 0x40) ? 1 : 0; file->ef_structure = d[0] & 7; switch ((d[0]>>3) & 7) { case 0: file->type = SC_FILE_TYPE_WORKING_EF; break; case 7: file->type = SC_FILE_TYPE_DF; break; default: sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "invalid file type %02X in file descriptor\n", d[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } break; case 0x83: file->id = (d[0]<<8) | d[1]; break; case 0x84: file->namelen = MIN(sizeof file->name, len); memcpy(file->name, d, file->namelen); break; case 0x86: sc_file_set_sec_attr(file, d, len); break; default: if (len>0) sc_file_set_prop_attr(file, d, len); } } file->magic = SC_FILE_MAGIC; parse_sec_attr(card, file, file->sec_attr, file->sec_attr_len); return 0; } static int tcos_list_files(sc_card_t *card, u8 *buf, size_t buflen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE], p1; int r, count = 0; assert(card != NULL); ctx = card->ctx; for (p1=1; p1<=2; p1++) { sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xAA, p1, 0); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (apdu.sw1==0x6A && (apdu.sw2==0x82 || apdu.sw2==0x88)) continue; r = sc_check_sw(card, apdu.sw1, apdu.sw2); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "List Dir failed"); if (apdu.resplen > buflen) return SC_ERROR_BUFFER_TOO_SMALL; sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "got %"SC_FORMAT_LEN_SIZE_T"u %s-FileIDs\n", apdu.resplen / 2, p1 == 1 ? "DF" : "EF"); memcpy(buf, apdu.resp, apdu.resplen); buf += apdu.resplen; buflen -= apdu.resplen; count += apdu.resplen; } return count; } static int tcos_delete_file(sc_card_t *card, const sc_path_t *path) { int r; u8 sbuf[2]; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (path->type != SC_PATH_TYPE_FILE_ID && path->len != 2) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "File type has to be SC_PATH_TYPE_FILE_ID\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } sbuf[0] = path->value[0]; sbuf[1] = path->value[1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { sc_context_t *ctx; sc_apdu_t apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE], *p; int r, default_key, tcos3; tcos_data *data; assert(card != NULL && env != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; if (se_num || (env->operation!=SC_SEC_OPERATION_DECIPHER && env->operation!=SC_SEC_OPERATION_SIGN)){ SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } if(!(env->flags & SC_SEC_ENV_KEY_REF_PRESENT)) sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "No Key-Reference in SecEnvironment\n"); else sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "Key-Reference %02X (len=%"SC_FORMAT_LEN_SIZE_T"u)\n", env->key_ref[0], env->key_ref_len); /* Key-Reference 0x80 ?? */ default_key= !(env->flags & SC_SEC_ENV_KEY_REF_PRESENT) || (env->key_ref_len==1 && env->key_ref[0]==0x80); sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "TCOS3:%d PKCS1:%d\n", tcos3, !!(env->algorithm_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); data->pad_flags = env->algorithm_flags; data->next_sign = default_key; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, tcos3 ? 0x41 : 0xC1, 0xB8); p = sbuf; *p++=0x80; *p++=0x01; *p++=tcos3 ? 0x0A : 0x10; if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { *p++ = (env->flags & SC_SEC_ENV_KEY_REF_SYMMETRIC) ? 0x83 : 0x84; *p++ = env->key_ref_len; memcpy(p, env->key_ref, env->key_ref_len); p += env->key_ref_len; } apdu.data = sbuf; apdu.lc = apdu.datalen = (p - sbuf); r=sc_transmit_apdu(card, &apdu); if (r) { sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "%s: APDU transmit failed", sc_strerror(r)); return r; } if (apdu.sw1==0x6A && (apdu.sw2==0x81 || apdu.sw2==0x88)) { sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "Detected Signature-Only key\n"); if (env->operation==SC_SEC_OPERATION_SIGN && default_key) return SC_SUCCESS; } SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_restore_security_env(sc_card_t *card, int se_num) { return 0; } static int tcos_compute_signature(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { size_t i, dlen=datalen; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; int tcos3, r; assert(card != NULL && data != NULL && out != NULL); tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); if (datalen > 255) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); if(((tcos_data *)card->drv_data)->next_sign){ if(datalen>48){ sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Data to be signed is too long (TCOS supports max. 48 bytes)\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A, 0x9E, 0x9A); memcpy(sbuf, data, datalen); dlen=datalen; } else { int keylen= tcos3 ? 256 : 128; sc_format_apdu(card, &apdu, keylen>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = tcos3 ? 256 : 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (tcos3 && apdu.p1==0x80 && apdu.sw1==0x6A && apdu.sw2==0x87) { int keylen=128; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); } if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len = apdu.resplen>outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_decipher(sc_card_t *card, const u8 * crgram, size_t crgram_len, u8 * out, size_t outlen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; tcos_data *data; int tcos3, r; assert(card != NULL && crgram != NULL && out != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; SC_FUNC_CALLED(ctx, SC_LOG_DEBUG_NORMAL); sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "TCOS3:%d PKCS1:%d\n",tcos3, !!(data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); sc_format_apdu(card, &apdu, crgram_len>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = crgram_len; apdu.data = sbuf; apdu.lc = apdu.datalen = crgram_len+1; sbuf[0] = tcos3 ? 0x00 : ((data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) ? 0x81 : 0x02); memcpy(sbuf+1, crgram, crgram_len); r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len= (apdu.resplen>outlen) ? outlen : apdu.resplen; unsigned int offset=0; if(tcos3 && (data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) && apdu.resp[0]==0 && apdu.resp[1]==2){ offset=2; while(offset<len && apdu.resp[offset]!=0) ++offset; offset=(offset<len-1) ? offset+1 : 0; } memcpy(out, apdu.resp+offset, len-offset); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len-offset); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } /* Issue the SET PERMANENT command. With ENABLE_NULLPIN set the NullPIN method will be activated, otherwise the permanent operation will be done on the active file. */ static int tcos_setperm(sc_card_t *card, int enable_nullpin) { int r; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xEE, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 0; apdu.datalen = 0; apdu.data = NULL; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_get_serialnr(sc_card_t *card, sc_serial_number_t *serial) { int r; if (!serial) return SC_ERROR_INVALID_ARGUMENTS; /* see if we have cached serial number */ if (card->serialnr.len) { memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } card->serialnr.len = sizeof card->serialnr.value; r = sc_parse_ef_gdo(card, card->serialnr.value, &card->serialnr.len, NULL, 0); if (r < 0) { card->serialnr.len = 0; return r; } /* copy and return serial number */ memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } static int tcos_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr) { switch (cmd) { case SC_CARDCTL_TCOS_SETPERM: return tcos_setperm(card, !!ptr); case SC_CARDCTL_GET_SERIALNR: return tcos_get_serialnr(card, (sc_serial_number_t *)ptr); } return SC_ERROR_NOT_SUPPORTED; } struct sc_card_driver * sc_get_tcos_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; tcos_ops = *iso_drv->ops; tcos_ops.match_card = tcos_match_card; tcos_ops.init = tcos_init; tcos_ops.finish = tcos_finish; tcos_ops.create_file = tcos_create_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.select_file = tcos_select_file; tcos_ops.list_files = tcos_list_files; tcos_ops.delete_file = tcos_delete_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.compute_signature = tcos_compute_signature; tcos_ops.decipher = tcos_decipher; tcos_ops.restore_security_env = tcos_restore_security_env; tcos_ops.card_ctl = tcos_card_ctl; return &tcos_drv; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_346_3
crossvul-cpp_data_bad_2288_0
#include <trace/syscall.h> #include <trace/events/syscalls.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ #include <linux/ftrace.h> #include <linux/perf_event.h> #include <asm/syscall.h> #include "trace_output.h" #include "trace.h" static DEFINE_MUTEX(syscall_trace_lock); static int syscall_enter_register(struct ftrace_event_call *event, enum trace_reg type, void *data); static int syscall_exit_register(struct ftrace_event_call *event, enum trace_reg type, void *data); static struct list_head * syscall_get_enter_fields(struct ftrace_event_call *call) { struct syscall_metadata *entry = call->data; return &entry->enter_fields; } extern struct syscall_metadata *__start_syscalls_metadata[]; extern struct syscall_metadata *__stop_syscalls_metadata[]; static struct syscall_metadata **syscalls_metadata; #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { /* * Only compare after the "sys" prefix. Archs that use * syscall wrappers may have syscalls symbols aliases prefixed * with ".SyS" or ".sys" instead of "sys", leading to an unwanted * mismatch. */ return !strcmp(sym + 3, name + 3); } #endif #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS /* * Some architectures that allow for 32bit applications * to run on a 64bit kernel, do not map the syscalls for * the 32bit tasks the same as they do for 64bit tasks. * * *cough*x86*cough* * * In such a case, instead of reporting the wrong syscalls, * simply ignore them. * * For an arch to ignore the compat syscalls it needs to * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as * define the function arch_trace_is_compat_syscall() to let * the tracing system know that it should ignore it. */ static int trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) { if (unlikely(arch_trace_is_compat_syscall(regs))) return -1; return syscall_get_nr(task, regs); } #else static inline int trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) { return syscall_get_nr(task, regs); } #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ static __init struct syscall_metadata * find_syscall_meta(unsigned long syscall) { struct syscall_metadata **start; struct syscall_metadata **stop; char str[KSYM_SYMBOL_LEN]; start = __start_syscalls_metadata; stop = __stop_syscalls_metadata; kallsyms_lookup(syscall, NULL, NULL, NULL, str); if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) return NULL; for ( ; start < stop; start++) { if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) return *start; } return NULL; } static struct syscall_metadata *syscall_nr_to_meta(int nr) { if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) return NULL; return syscalls_metadata[nr]; } static enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, struct trace_event *event) { struct trace_seq *s = &iter->seq; struct trace_entry *ent = iter->ent; struct syscall_trace_enter *trace; struct syscall_metadata *entry; int i, ret, syscall; trace = (typeof(trace))ent; syscall = trace->nr; entry = syscall_nr_to_meta(syscall); if (!entry) goto end; if (entry->enter_event->event.type != ent->type) { WARN_ON_ONCE(1); goto end; } ret = trace_seq_printf(s, "%s(", entry->name); if (!ret) return TRACE_TYPE_PARTIAL_LINE; for (i = 0; i < entry->nb_args; i++) { /* parameter types */ if (trace_flags & TRACE_ITER_VERBOSE) { ret = trace_seq_printf(s, "%s ", entry->types[i]); if (!ret) return TRACE_TYPE_PARTIAL_LINE; } /* parameter values */ ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i], trace->args[i], i == entry->nb_args - 1 ? "" : ", "); if (!ret) return TRACE_TYPE_PARTIAL_LINE; } ret = trace_seq_putc(s, ')'); if (!ret) return TRACE_TYPE_PARTIAL_LINE; end: ret = trace_seq_putc(s, '\n'); if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } static enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, struct trace_event *event) { struct trace_seq *s = &iter->seq; struct trace_entry *ent = iter->ent; struct syscall_trace_exit *trace; int syscall; struct syscall_metadata *entry; int ret; trace = (typeof(trace))ent; syscall = trace->nr; entry = syscall_nr_to_meta(syscall); if (!entry) { trace_seq_putc(s, '\n'); return TRACE_TYPE_HANDLED; } if (entry->exit_event->event.type != ent->type) { WARN_ON_ONCE(1); return TRACE_TYPE_UNHANDLED; } ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, trace->ret); if (!ret) return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_HANDLED; } extern char *__bad_type_size(void); #define SYSCALL_FIELD(type, name) \ sizeof(type) != sizeof(trace.name) ? \ __bad_type_size() : \ #type, #name, offsetof(typeof(trace), name), \ sizeof(trace.name), is_signed_type(type) static int __init __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) { int i; int pos = 0; /* When len=0, we just calculate the needed length */ #define LEN_OR_ZERO (len ? len - pos : 0) pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); for (i = 0; i < entry->nb_args; i++) { pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", entry->args[i], sizeof(unsigned long), i == entry->nb_args - 1 ? "" : ", "); } pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); for (i = 0; i < entry->nb_args; i++) { pos += snprintf(buf + pos, LEN_OR_ZERO, ", ((unsigned long)(REC->%s))", entry->args[i]); } #undef LEN_OR_ZERO /* return the length of print_fmt */ return pos; } static int __init set_syscall_print_fmt(struct ftrace_event_call *call) { char *print_fmt; int len; struct syscall_metadata *entry = call->data; if (entry->enter_event != call) { call->print_fmt = "\"0x%lx\", REC->ret"; return 0; } /* First: called with 0 length to calculate the needed length */ len = __set_enter_print_fmt(entry, NULL, 0); print_fmt = kmalloc(len + 1, GFP_KERNEL); if (!print_fmt) return -ENOMEM; /* Second: actually write the @print_fmt */ __set_enter_print_fmt(entry, print_fmt, len + 1); call->print_fmt = print_fmt; return 0; } static void __init free_syscall_print_fmt(struct ftrace_event_call *call) { struct syscall_metadata *entry = call->data; if (entry->enter_event == call) kfree(call->print_fmt); } static int __init syscall_enter_define_fields(struct ftrace_event_call *call) { struct syscall_trace_enter trace; struct syscall_metadata *meta = call->data; int ret; int i; int offset = offsetof(typeof(trace), args); ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); if (ret) return ret; for (i = 0; i < meta->nb_args; i++) { ret = trace_define_field(call, meta->types[i], meta->args[i], offset, sizeof(unsigned long), 0, FILTER_OTHER); offset += sizeof(unsigned long); } return ret; } static int __init syscall_exit_define_fields(struct ftrace_event_call *call) { struct syscall_trace_exit trace; int ret; ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); if (ret) return ret; ret = trace_define_field(call, SYSCALL_FIELD(long, ret), FILTER_OTHER); return ret; } static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) { struct trace_array *tr = data; struct ftrace_event_file *ftrace_file; struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); if (!ftrace_file) return; if (ftrace_trigger_soft_disabled(ftrace_file)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); event_trigger_unlock_commit(ftrace_file, buffer, event, entry, irq_flags, pc); } static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) { struct trace_array *tr = data; struct ftrace_event_file *ftrace_file; struct syscall_trace_exit *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); if (!ftrace_file) return; if (ftrace_trigger_soft_disabled(ftrace_file)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->exit_event->event.type, sizeof(*entry), irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; entry->ret = syscall_get_return_value(current, regs); event_trigger_unlock_commit(ftrace_file, buffer, event, entry, irq_flags, pc); } static int reg_event_syscall_enter(struct ftrace_event_file *file, struct ftrace_event_call *call) { struct trace_array *tr = file->tr; int ret = 0; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) return -ENOSYS; mutex_lock(&syscall_trace_lock); if (!tr->sys_refcount_enter) ret = register_trace_sys_enter(ftrace_syscall_enter, tr); if (!ret) { rcu_assign_pointer(tr->enter_syscall_files[num], file); tr->sys_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } static void unreg_event_syscall_enter(struct ftrace_event_file *file, struct ftrace_event_call *call) { struct trace_array *tr = file->tr; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) return; mutex_lock(&syscall_trace_lock); tr->sys_refcount_enter--; RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); if (!tr->sys_refcount_enter) unregister_trace_sys_enter(ftrace_syscall_enter, tr); mutex_unlock(&syscall_trace_lock); } static int reg_event_syscall_exit(struct ftrace_event_file *file, struct ftrace_event_call *call) { struct trace_array *tr = file->tr; int ret = 0; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) return -ENOSYS; mutex_lock(&syscall_trace_lock); if (!tr->sys_refcount_exit) ret = register_trace_sys_exit(ftrace_syscall_exit, tr); if (!ret) { rcu_assign_pointer(tr->exit_syscall_files[num], file); tr->sys_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } static void unreg_event_syscall_exit(struct ftrace_event_file *file, struct ftrace_event_call *call) { struct trace_array *tr = file->tr; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) return; mutex_lock(&syscall_trace_lock); tr->sys_refcount_exit--; RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); if (!tr->sys_refcount_exit) unregister_trace_sys_exit(ftrace_syscall_exit, tr); mutex_unlock(&syscall_trace_lock); } static int __init init_syscall_trace(struct ftrace_event_call *call) { int id; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; if (num < 0 || num >= NR_syscalls) { pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", ((struct syscall_metadata *)call->data)->name); return -ENOSYS; } if (set_syscall_print_fmt(call) < 0) return -ENOMEM; id = trace_event_raw_init(call); if (id < 0) { free_syscall_print_fmt(call); return id; } return id; } struct trace_event_functions enter_syscall_print_funcs = { .trace = print_syscall_enter, }; struct trace_event_functions exit_syscall_print_funcs = { .trace = print_syscall_exit, }; struct ftrace_event_class __refdata event_class_syscall_enter = { .system = "syscalls", .reg = syscall_enter_register, .define_fields = syscall_enter_define_fields, .get_fields = syscall_get_enter_fields, .raw_init = init_syscall_trace, }; struct ftrace_event_class __refdata event_class_syscall_exit = { .system = "syscalls", .reg = syscall_exit_register, .define_fields = syscall_exit_define_fields, .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), .raw_init = init_syscall_trace, }; unsigned long __init __weak arch_syscall_addr(int nr) { return (unsigned long)sys_call_table[nr]; } static int __init init_ftrace_syscalls(void) { struct syscall_metadata *meta; unsigned long addr; int i; syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); return -ENOMEM; } for (i = 0; i < NR_syscalls; i++) { addr = arch_syscall_addr(i); meta = find_syscall_meta(addr); if (!meta) continue; meta->syscall_nr = i; syscalls_metadata[i] = meta; } return 0; } early_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); static int sys_perf_refcount_enter; static int sys_perf_refcount_exit; static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; struct hlist_head *head; int syscall_nr; int rctx; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; head = this_cpu_ptr(sys_data->enter_event->perf_events); if (hlist_empty(head)) return; /* get the size after alignment with the u32 buffer size field */ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->event.type, regs, &rctx); if (!rec) return; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } static int perf_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); if (!sys_perf_refcount_enter) ret = register_trace_sys_enter(perf_syscall_enter, NULL); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); } else { set_bit(num, enabled_perf_enter_syscalls); sys_perf_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } static void perf_sysenter_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); sys_perf_refcount_enter--; clear_bit(num, enabled_perf_enter_syscalls); if (!sys_perf_refcount_enter) unregister_trace_sys_enter(perf_syscall_enter, NULL); mutex_unlock(&syscall_trace_lock); } static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; struct hlist_head *head; int syscall_nr; int rctx; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; head = this_cpu_ptr(sys_data->exit_event->perf_events); if (hlist_empty(head)) return; /* We can probably do that at build time */ size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); size -= sizeof(u32); rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, sys_data->exit_event->event.type, regs, &rctx); if (!rec) return; rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } static int perf_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); if (!sys_perf_refcount_exit) ret = register_trace_sys_exit(perf_syscall_exit, NULL); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); } else { set_bit(num, enabled_perf_exit_syscalls); sys_perf_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } static void perf_sysexit_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); sys_perf_refcount_exit--; clear_bit(num, enabled_perf_exit_syscalls); if (!sys_perf_refcount_exit) unregister_trace_sys_exit(perf_syscall_exit, NULL); mutex_unlock(&syscall_trace_lock); } #endif /* CONFIG_PERF_EVENTS */ static int syscall_enter_register(struct ftrace_event_call *event, enum trace_reg type, void *data) { struct ftrace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: return reg_event_syscall_enter(file, event); case TRACE_REG_UNREGISTER: unreg_event_syscall_enter(file, event); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return perf_sysenter_enable(event); case TRACE_REG_PERF_UNREGISTER: perf_sysenter_disable(event); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif } return 0; } static int syscall_exit_register(struct ftrace_event_call *event, enum trace_reg type, void *data) { struct ftrace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: return reg_event_syscall_exit(file, event); case TRACE_REG_UNREGISTER: unreg_event_syscall_exit(file, event); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return perf_sysexit_enable(event); case TRACE_REG_PERF_UNREGISTER: perf_sysexit_disable(event); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif } return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_2288_0
crossvul-cpp_data_good_2131_1
/* * HID driver for Kye/Genius devices not fully compliant with HID standard * * Copyright (c) 2009 Jiri Kosina * Copyright (c) 2009 Tomas Hanak * Copyright (c) 2012 Nikolai Kondrashov */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * See EasyPen i405X description, device and HID report descriptors at * http://sf.net/apps/mediawiki/digimend/?title=KYE_EasyPen_i405X */ /* Original EasyPen i405X report descriptor size */ #define EASYPEN_I405X_RDESC_ORIG_SIZE 476 /* Fixed EasyPen i405X report descriptor */ static __u8 easypen_i405x_rdesc_fixed[] = { 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x05, /* Report ID (5), */ 0x09, 0x01, /* Usage (01h), */ 0x15, 0x80, /* Logical Minimum (-128), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x10, /* Report ID (16), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x7C, 0x15, /* Physical Maximum (5500), */ 0x26, 0x00, 0x37, /* Logical Maximum (14080), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */ 0x26, 0x00, 0x28, /* Logical Maximum (10240), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; /* * See MousePen i608X description, device and HID report descriptors at * http://sf.net/apps/mediawiki/digimend/?title=KYE_MousePen_i608X */ /* Original MousePen i608X report descriptor size */ #define MOUSEPEN_I608X_RDESC_ORIG_SIZE 476 /* Fixed MousePen i608X report descriptor */ static __u8 mousepen_i608x_rdesc_fixed[] = { 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x05, /* Report ID (5), */ 0x09, 0x01, /* Usage (01h), */ 0x15, 0x80, /* Logical Minimum (-128), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x10, /* Report ID (16), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0x00, 0x50, /* Logical Maximum (20480), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */ 0x26, 0x00, 0x3C, /* Logical Maximum (15360), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x11, /* Report ID (17), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x09, /* Usage Page (Button), */ 0x75, 0x01, /* Report Size (1), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0xB4, /* Pop, */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x75, 0x10, /* Report Size (16), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0x00, 0x50, /* Logical Maximum (20480), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */ 0x26, 0x00, 0x3C, /* Logical Maximum (15360), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; /* * See EasyPen M610X description, device and HID report descriptors at * http://sf.net/apps/mediawiki/digimend/?title=KYE_EasyPen_M610X */ /* Original EasyPen M610X report descriptor size */ #define EASYPEN_M610X_RDESC_ORIG_SIZE 476 /* Fixed EasyPen M610X report descriptor */ static __u8 easypen_m610x_rdesc_fixed[] = { 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x05, /* Report ID (5), */ 0x09, 0x01, /* Usage (01h), */ 0x15, 0x80, /* Logical Minimum (-128), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x02, /* Usage (Pen), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x10, /* Report ID (16), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ 0x27, 0x00, 0xA0, 0x00, 0x00, /* Logical Maximum (40960), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x6A, 0x18, /* Physical Maximum (6250), */ 0x26, 0x00, 0x64, /* Logical Maximum (25600), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x0C, /* Usage Page (Consumer), */ 0x09, 0x01, /* Usage (Consumer Control), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x12, /* Report ID (18), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x0A, 0x1A, 0x02, /* Usage (AC Undo), */ 0x0A, 0x79, 0x02, /* Usage (AC Redo Or Repeat), */ 0x0A, 0x2D, 0x02, /* Usage (AC Zoom In), */ 0x0A, 0x2E, 0x02, /* Usage (AC Zoom Out), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x14, /* Report Size (20), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x20, /* Report Size (32), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0xC0 /* End Collection */ }; static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize, int offset, const char *device_name) { /* * the fixup that need to be done: * - change Usage Maximum in the Comsumer Control * (report ID 3) to a reasonable value */ if (*rsize >= offset + 31 && /* Usage Page (Consumer Devices) */ rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c && /* Usage (Consumer Control) */ rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 && /* Usage Maximum > 12287 */ rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) { hid_info(hdev, "fixing up %s report descriptor\n", device_name); rdesc[offset + 12] = 0x2f; } return rdesc; } static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_KYE_ERGO_525V: /* the fixups that need to be done: * - change led usage page to button for extra buttons * - report size 8 count 1 must be size 1 count 8 for button * bitfield * - change the button usage range to 4-7 for the extra * buttons */ if (*rsize >= 75 && rdesc[61] == 0x05 && rdesc[62] == 0x08 && rdesc[63] == 0x19 && rdesc[64] == 0x08 && rdesc[65] == 0x29 && rdesc[66] == 0x0f && rdesc[71] == 0x75 && rdesc[72] == 0x08 && rdesc[73] == 0x95 && rdesc[74] == 0x01) { hid_info(hdev, "fixing up Kye/Genius Ergo Mouse " "report descriptor\n"); rdesc[62] = 0x09; rdesc[64] = 0x04; rdesc[66] = 0x07; rdesc[72] = 0x01; rdesc[74] = 0x08; } break; case USB_DEVICE_ID_KYE_EASYPEN_I405X: if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) { rdesc = easypen_i405x_rdesc_fixed; *rsize = sizeof(easypen_i405x_rdesc_fixed); } break; case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { rdesc = mousepen_i608x_rdesc_fixed; *rsize = sizeof(mousepen_i608x_rdesc_fixed); } break; case USB_DEVICE_ID_KYE_EASYPEN_M610X: if (*rsize == EASYPEN_M610X_RDESC_ORIG_SIZE) { rdesc = easypen_m610x_rdesc_fixed; *rsize = sizeof(easypen_m610x_rdesc_fixed); } break; case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE: rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, "Genius Gila Gaming Mouse"); break; case USB_DEVICE_ID_GENIUS_GX_IMPERATOR: rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, "Genius Gx Imperator Keyboard"); break; case USB_DEVICE_ID_GENIUS_MANTICORE: rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, "Genius Manticore Keyboard"); break; } return rdesc; } /** * Enable fully-functional tablet mode by setting a special feature report. * * @hdev: HID device * * The specific report ID and data were discovered by sniffing the * Windows driver traffic. */ static int kye_tablet_enable(struct hid_device *hdev) { struct list_head *list; struct list_head *head; struct hid_report *report; __s32 *value; list = &hdev->report_enum[HID_FEATURE_REPORT].report_list; list_for_each(head, list) { report = list_entry(head, struct hid_report, list); if (report->id == 5) break; } if (head == list) { hid_err(hdev, "tablet-enabling feature report not found\n"); return -ENODEV; } if (report->maxfield < 1 || report->field[0]->report_count < 7) { hid_err(hdev, "invalid tablet-enabling feature report\n"); return -ENODEV; } value = report->field[0]->value; value[0] = 0x12; value[1] = 0x10; value[2] = 0x11; value[3] = 0x12; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); return 0; } static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } switch (id->product) { case USB_DEVICE_ID_KYE_EASYPEN_I405X: case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: case USB_DEVICE_ID_KYE_EASYPEN_M610X: ret = kye_tablet_enable(hdev); if (ret) { hid_err(hdev, "tablet enabling failed\n"); goto enabling_err; } break; case USB_DEVICE_ID_GENIUS_MANTICORE: /* * The manticore keyboard needs to have all the interfaces * opened at least once to be fully functional. */ if (hid_hw_open(hdev)) hid_hw_close(hdev); break; } return 0; enabling_err: hid_hw_stop(hdev); err: return ret; } static const struct hid_device_id kye_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, { } }; MODULE_DEVICE_TABLE(hid, kye_devices); static struct hid_driver kye_driver = { .name = "kye", .id_table = kye_devices, .probe = kye_probe, .report_fixup = kye_report_fixup, }; module_hid_driver(kye_driver); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-119/c/good_2131_1
crossvul-cpp_data_good_3638_0
/* * linux/fs/proc/root.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc root directory handling functions */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/mount.h> #include <linux/pid_namespace.h> #include "internal.h" static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; } static int proc_set_super(struct super_block *sb, void *data) { int err = set_anon_super(sb, NULL); if (!err) { struct pid_namespace *ns = (struct pid_namespace *)data; sb->s_fs_info = get_pid_ns(ns); } return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; if (flags & MS_KERNMOUNT) ns = (struct pid_namespace *)data; else ns = current->nsproxy->pid_ns; sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return ERR_PTR(err); } sb->s_flags |= MS_ACTIVE; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } return dget(sb->s_root); } static void proc_kill_sb(struct super_block *sb) { struct pid_namespace *ns; ns = (struct pid_namespace *)sb->s_fs_info; kill_anon_super(sb); put_pid_ns(ns); } static struct file_system_type proc_fs_type = { .name = "proc", .mount = proc_mount, .kill_sb = proc_kill_sb, }; void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat ) { generic_fillattr(dentry->d_inode, stat); stat->nlink = proc_root.nlink + nr_processes(); return 0; } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos; int ret; if (nr < FIRST_PROCESS_ENTRY) { int error = proc_readdir(filp, dirent, filldir); if (error <= 0) return error; filp->f_pos = FIRST_PROCESS_ENTRY; } ret = proc_pid_readdir(filp, dirent, filldir); return ret; } /* * The root /proc directory is special, as it has the * <pid> directories. Thus we don't use the generic * directory handling functions for that.. */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, .readdir = proc_root_readdir, .llseek = default_llseek, }; /* * proc root can do almost nothing.. */ static const struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, .getattr = proc_root_getattr, }; /* * This is the root "inode" in the /proc tree.. */ struct proc_dir_entry proc_root = { .low_ino = PROC_ROOT_INO, .namelen = 5, .mode = S_IFDIR | S_IRUGO | S_IXUGO, .nlink = 2, .count = ATOMIC_INIT(1), .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, .name = "/proc", }; int pid_ns_prepare_proc(struct pid_namespace *ns) { struct vfsmount *mnt; mnt = kern_mount_data(&proc_fs_type, ns); if (IS_ERR(mnt)) return PTR_ERR(mnt); ns->proc_mnt = mnt; return 0; } void pid_ns_release_proc(struct pid_namespace *ns) { kern_unmount(ns->proc_mnt); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_3638_0
crossvul-cpp_data_good_3578_1
/* +----------------------------------------------------------------------+ | Suhosin Version 1 | +----------------------------------------------------------------------+ | Copyright (c) 2006-2007 The Hardened-PHP Project | | Copyright (c) 2007-2012 SektionEins GmbH | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Stefan Esser <sesser@sektioneins.de> | +----------------------------------------------------------------------+ */ /* $Id: header.c,v 1.1.1.1 2007-11-28 01:15:35 sesser Exp $ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/url.h" #include "php_suhosin.h" #include "SAPI.h" #include "php_variables.h" #if PHP_VERSION_ID >= 50300 static int (*orig_header_handler)(sapi_header_struct *sapi_header, sapi_header_op_enum op, sapi_headers_struct *sapi_headers TSRMLS_DC) = NULL; #else static int (*orig_header_handler)(sapi_header_struct *sapi_header, sapi_headers_struct *sapi_headers TSRMLS_DC) = NULL; #endif char *suhosin_encrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key TSRMLS_DC) { char *buf, *buf2, *d, *d_url; int l; buf = estrndup(name, name_len); name_len = php_url_decode(buf, name_len); normalize_varname(buf); name_len = strlen(buf); if (SUHOSIN_G(cookie_plainlist)) { if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) { encrypt_return_plain: efree(buf); return estrndup(value, value_len); } } else if (SUHOSIN_G(cookie_cryptlist)) { if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) { goto encrypt_return_plain; } } buf2 = estrndup(value, value_len); value_len = php_url_decode(buf2, value_len); d = suhosin_encrypt_string(buf2, value_len, buf, name_len, key TSRMLS_CC); d_url = php_url_encode(d, strlen(d), &l); efree(d); efree(buf); efree(buf2); return d_url; } char *suhosin_decrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key, char **where TSRMLS_DC) { int o_name_len = name_len; char *buf, *buf2, *d, *d_url; int l; buf = estrndup(name, name_len); name_len = php_url_decode(buf, name_len); normalize_varname(buf); name_len = strlen(buf); if (SUHOSIN_G(cookie_plainlist)) { if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) { decrypt_return_plain: efree(buf); memcpy(*where, name, o_name_len); *where += o_name_len; **where = '='; *where +=1; memcpy(*where, value, value_len); *where += value_len; return *where; } } else if (SUHOSIN_G(cookie_cryptlist)) { if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) { goto decrypt_return_plain; } } buf2 = estrndup(value, value_len); value_len = php_url_decode(buf2, value_len); d = suhosin_decrypt_string(buf2, value_len, buf, name_len, key, &l, SUHOSIN_G(cookie_checkraddr) TSRMLS_CC); if (d == NULL) { goto skip_cookie; } d_url = php_url_encode(d, l, &l); efree(d); memcpy(*where, name, o_name_len); *where += o_name_len; **where = '=';*where += 1; memcpy(*where, d_url, l); *where += l; efree(d_url); skip_cookie: efree(buf); efree(buf2); return *where; } /* {{{ suhosin_cookie_decryptor */ char *suhosin_cookie_decryptor(TSRMLS_D) { char *raw_cookie = SG(request_info).cookie_data; char *decrypted, *ret, *var, *val, *tmp; int j; char cryptkey[33]; /* if (...deactivated...) { return estrdup(raw_cookie); } */ suhosin_generate_key(SUHOSIN_G(cookie_cryptkey), SUHOSIN_G(cookie_cryptua), SUHOSIN_G(cookie_cryptdocroot), SUHOSIN_G(cookie_cryptraddr), (char *)&cryptkey TSRMLS_CC); ret = decrypted = emalloc(strlen(raw_cookie)*4+1); raw_cookie = estrdup(raw_cookie); SUHOSIN_G(raw_cookie) = estrdup(raw_cookie); j = 0; tmp = raw_cookie; while (*tmp) { char *d_url;int varlen; while (*tmp == '\t' || *tmp == ' ') tmp++; var = tmp; while (*tmp && *tmp != ';' && *tmp != '=') tmp++; varlen = tmp-var; /*memcpy(decrypted, var, varlen); decrypted += varlen;*/ if (*tmp == 0) break; if (*tmp++ == ';') { *decrypted++ = ';'; continue; } /**decrypted++ = '=';*/ val = tmp; while (*tmp && *tmp != ';') tmp++; d_url = suhosin_decrypt_single_cookie(var, varlen, val, tmp-val, (char *)&cryptkey, &decrypted TSRMLS_CC); if (*tmp == ';') { *decrypted++ = ';'; } if (*tmp == 0) break; tmp++; } *decrypted++ = 0; ret = erealloc(ret, decrypted-ret); SUHOSIN_G(decrypted_cookie) = ret; efree(raw_cookie); return ret; } /* }}} */ /* {{{ suhosin_header_handler */ #if PHP_VERSION_ID >= 50300 int suhosin_header_handler(sapi_header_struct *sapi_header, sapi_header_op_enum op, sapi_headers_struct *sapi_headers TSRMLS_DC) #else int suhosin_header_handler(sapi_header_struct *sapi_header, sapi_headers_struct *sapi_headers TSRMLS_DC) #endif { int retval = SAPI_HEADER_ADD, i; char *tmp; #if PHP_VERSION_ID >= 50300 if (op != SAPI_HEADER_ADD && op != SAPI_HEADER_REPLACE) { goto suhosin_skip_header_handling; } #endif if (sapi_header && sapi_header->header) { tmp = sapi_header->header; for (i=0; i<sapi_header->header_len; i++, tmp++) { if (tmp[0] == 0) { char *fname = get_active_function_name(TSRMLS_C); if (!fname) { fname = "unknown"; } suhosin_log(S_MISC, "%s() - wanted to send a HTTP header with an ASCII NUL in it", fname); if (!SUHOSIN_G(simulation)) { sapi_header->header_len = i; } } if (SUHOSIN_G(allow_multiheader)) { continue; } else if ((tmp[0] == '\r' && (tmp[1] != '\n' || i == 0)) || (tmp[0] == '\n' && (i == sapi_header->header_len-1 || i == 0 || (tmp[1] != ' ' && tmp[1] != '\t')))) { char *fname = get_active_function_name(TSRMLS_C); if (!fname) { fname = "unknown"; } suhosin_log(S_MISC, "%s() - wanted to send multiple HTTP headers at once", fname); if (!SUHOSIN_G(simulation)) { sapi_header->header_len = i; tmp[0] = 0; } } } } /* Handle a potential cookie */ if (SUHOSIN_G(cookie_encrypt) && (strncasecmp("Set-Cookie:", sapi_header->header, sizeof("Set-Cookie:")-1) == 0)) { char *start, *end, *rend, *tmp; char *name, *value; int nlen, vlen, len, tlen; char cryptkey[33]; suhosin_generate_key(SUHOSIN_G(cookie_cryptkey), SUHOSIN_G(cookie_cryptua), SUHOSIN_G(cookie_cryptdocroot), SUHOSIN_G(cookie_cryptraddr), (char *)&cryptkey TSRMLS_CC); start = estrndup(sapi_header->header, sapi_header->header_len); rend = end = start + sapi_header->header_len; tmp = memchr(start, ';', end-start); if (tmp != NULL) { end = tmp; } tmp = start + sizeof("Set-Cookie:") - 1; while (tmp < end && tmp[0]==' ') { tmp++; } name = tmp; nlen = end-name; tmp = memchr(name, '=', nlen); if (tmp == NULL) { value = end; } else { value = tmp+1; nlen = tmp-name; } vlen = end-value; value = suhosin_encrypt_single_cookie(name, nlen, value, vlen, (char *)&cryptkey TSRMLS_CC); vlen = strlen(value); len = sizeof("Set-Cookie: ")-1 + nlen + 1 + vlen + rend-end; tmp = emalloc(len + 1); tlen = sprintf(tmp, "Set-Cookie: %.*s=%s", nlen,name, value); memcpy(tmp + tlen, end, rend-end); tmp[len] = 0; efree(sapi_header->header); efree(value); efree(start); sapi_header->header = tmp; sapi_header->header_len = len; } suhosin_skip_header_handling: /* If existing call the sapi header handler */ if (orig_header_handler) { #if PHP_VERSION_ID >= 50300 retval = orig_header_handler(sapi_header, op, sapi_headers TSRMLS_CC); #else retval = orig_header_handler(sapi_header, sapi_headers TSRMLS_CC); #endif } return retval; } /* }}} */ /* {{{ suhosin_hook_header_handler */ void suhosin_hook_header_handler() { if (orig_header_handler == NULL) { orig_header_handler = sapi_module.header_handler; sapi_module.header_handler = suhosin_header_handler; } } /* }}} */ /* {{{ suhosin_unhook_header_handler */ void suhosin_unhook_header_handler() { sapi_module.header_handler = orig_header_handler; orig_header_handler = NULL; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-119/c/good_3578_1
crossvul-cpp_data_good_5565_0
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. Further, * there is no legitimate reason for setting this from * userspace so reject it if skb is NULL. */ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) kfree_rcu(old, rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) kfree_rcu(opt, rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; kfree_rcu(opt, rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
./CrossVul/dataset_final_sorted/CWE-119/c/good_5565_0
crossvul-cpp_data_good_344_7
/* * sc.c: General functions * * Copyright (C) 2001, 2002 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <ctype.h> #include <stdlib.h> #include <string.h> #include <assert.h> #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif #ifdef ENABLE_OPENSSL #include <openssl/crypto.h> /* for OPENSSL_cleanse */ #endif #include "internal.h" #ifdef PACKAGE_VERSION static const char *sc_version = PACKAGE_VERSION; #else static const char *sc_version = "(undef)"; #endif const char *sc_get_version(void) { return sc_version; } int sc_hex_to_bin(const char *in, u8 *out, size_t *outlen) { int err = SC_SUCCESS; size_t left, count = 0, in_len; if (in == NULL || out == NULL || outlen == NULL) { return SC_ERROR_INVALID_ARGUMENTS; } left = *outlen; in_len = strlen(in); while (*in != '\0') { int byte = 0, nybbles = 2; while (nybbles-- && *in && *in != ':' && *in != ' ') { char c; byte <<= 4; c = *in++; if ('0' <= c && c <= '9') c -= '0'; else if ('a' <= c && c <= 'f') c = c - 'a' + 10; else if ('A' <= c && c <= 'F') c = c - 'A' + 10; else { err = SC_ERROR_INVALID_ARGUMENTS; goto out; } byte |= c; } /* Detect premature end of string before byte is complete */ if (in_len > 1 && *in == '\0' && nybbles >= 0) { err = SC_ERROR_INVALID_ARGUMENTS; break; } if (*in == ':' || *in == ' ') in++; if (left <= 0) { err = SC_ERROR_BUFFER_TOO_SMALL; break; } out[count++] = (u8) byte; left--; } out: *outlen = count; return err; } int sc_bin_to_hex(const u8 *in, size_t in_len, char *out, size_t out_len, int in_sep) { unsigned int n, sep_len; char *pos, *end, sep; sep = (char)in_sep; sep_len = sep > 0 ? 1 : 0; pos = out; end = out + out_len; for (n = 0; n < in_len; n++) { if (pos + 3 + sep_len >= end) return SC_ERROR_BUFFER_TOO_SMALL; if (n && sep_len) *pos++ = sep; sprintf(pos, "%02x", in[n]); pos += 2; } *pos = '\0'; return SC_SUCCESS; } /* * Right trim all non-printable characters */ size_t sc_right_trim(u8 *buf, size_t len) { size_t i; if (!buf) return 0; if (len > 0) { for(i = len-1; i > 0; i--) { if(!isprint(buf[i])) { buf[i] = '\0'; len--; continue; } break; } } return len; } u8 *ulong2bebytes(u8 *buf, unsigned long x) { if (buf != NULL) { buf[3] = (u8) (x & 0xff); buf[2] = (u8) ((x >> 8) & 0xff); buf[1] = (u8) ((x >> 16) & 0xff); buf[0] = (u8) ((x >> 24) & 0xff); } return buf; } u8 *ushort2bebytes(u8 *buf, unsigned short x) { if (buf != NULL) { buf[1] = (u8) (x & 0xff); buf[0] = (u8) ((x >> 8) & 0xff); } return buf; } unsigned long bebytes2ulong(const u8 *buf) { if (buf == NULL) return 0UL; return (unsigned long) (buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]); } unsigned short bebytes2ushort(const u8 *buf) { if (buf == NULL) return 0U; return (unsigned short) (buf[0] << 8 | buf[1]); } unsigned short lebytes2ushort(const u8 *buf) { if (buf == NULL) return 0U; return (unsigned short)buf[1] << 8 | (unsigned short)buf[0]; } void sc_init_oid(struct sc_object_id *oid) { int ii; if (!oid) return; for (ii=0; ii<SC_MAX_OBJECT_ID_OCTETS; ii++) oid->value[ii] = -1; } int sc_format_oid(struct sc_object_id *oid, const char *in) { int ii, ret = SC_ERROR_INVALID_ARGUMENTS; const char *p; char *q; if (oid == NULL || in == NULL) return SC_ERROR_INVALID_ARGUMENTS; sc_init_oid(oid); p = in; for (ii=0; ii < SC_MAX_OBJECT_ID_OCTETS; ii++) { oid->value[ii] = strtol(p, &q, 10); if (!*q) break; if (!(q[0] == '.' && isdigit(q[1]))) goto out; p = q + 1; } if (!sc_valid_oid(oid)) goto out; ret = SC_SUCCESS; out: if (ret) sc_init_oid(oid); return ret; } int sc_compare_oid(const struct sc_object_id *oid1, const struct sc_object_id *oid2) { int i; if (oid1 == NULL || oid2 == NULL) { return SC_ERROR_INVALID_ARGUMENTS; } for (i = 0; i < SC_MAX_OBJECT_ID_OCTETS; i++) { if (oid1->value[i] != oid2->value[i]) return 0; if (oid1->value[i] == -1) break; } return 1; } int sc_valid_oid(const struct sc_object_id *oid) { int ii; if (!oid) return 0; if (oid->value[0] == -1 || oid->value[1] == -1) return 0; if (oid->value[0] > 2 || oid->value[1] > 39) return 0; for (ii=0;ii<SC_MAX_OBJECT_ID_OCTETS;ii++) if (oid->value[ii]) break; if (ii==SC_MAX_OBJECT_ID_OCTETS) return 0; return 1; } int sc_detect_card_presence(sc_reader_t *reader) { int r; LOG_FUNC_CALLED(reader->ctx); if (reader->ops->detect_card_presence == NULL) LOG_FUNC_RETURN(reader->ctx, SC_ERROR_NOT_SUPPORTED); r = reader->ops->detect_card_presence(reader); LOG_FUNC_RETURN(reader->ctx, r); } int sc_path_set(sc_path_t *path, int type, const u8 *id, size_t id_len, int idx, int count) { if (path == NULL || id == NULL || id_len == 0 || id_len > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memset(path, 0, sizeof(*path)); memcpy(path->value, id, id_len); path->len = id_len; path->type = type; path->index = idx; path->count = count; return SC_SUCCESS; } void sc_format_path(const char *str, sc_path_t *path) { int type = SC_PATH_TYPE_PATH; if (path) { memset(path, 0, sizeof(*path)); if (*str == 'i' || *str == 'I') { type = SC_PATH_TYPE_FILE_ID; str++; } path->len = sizeof(path->value); if (sc_hex_to_bin(str, path->value, &path->len) >= 0) { path->type = type; } path->count = -1; } } int sc_append_path(sc_path_t *dest, const sc_path_t *src) { return sc_concatenate_path(dest, dest, src); } int sc_append_path_id(sc_path_t *dest, const u8 *id, size_t idlen) { if (dest->len + idlen > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memcpy(dest->value + dest->len, id, idlen); dest->len += idlen; return SC_SUCCESS; } int sc_append_file_id(sc_path_t *dest, unsigned int fid) { u8 id[2] = { fid >> 8, fid & 0xff }; return sc_append_path_id(dest, id, 2); } int sc_concatenate_path(sc_path_t *d, const sc_path_t *p1, const sc_path_t *p2) { sc_path_t tpath; if (d == NULL || p1 == NULL || p2 == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (p1->type == SC_PATH_TYPE_DF_NAME || p2->type == SC_PATH_TYPE_DF_NAME) /* we do not support concatenation of AIDs at the moment */ return SC_ERROR_NOT_SUPPORTED; if (p1->len + p2->len > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memset(&tpath, 0, sizeof(sc_path_t)); memcpy(tpath.value, p1->value, p1->len); memcpy(tpath.value + p1->len, p2->value, p2->len); tpath.len = p1->len + p2->len; tpath.type = SC_PATH_TYPE_PATH; /* use 'index' and 'count' entry of the second path object */ tpath.index = p2->index; tpath.count = p2->count; /* the result is currently always as path */ tpath.type = SC_PATH_TYPE_PATH; *d = tpath; return SC_SUCCESS; } const char *sc_print_path(const sc_path_t *path) { static char buffer[SC_MAX_PATH_STRING_SIZE + SC_MAX_AID_STRING_SIZE]; if (sc_path_print(buffer, sizeof(buffer), path) != SC_SUCCESS) buffer[0] = '\0'; return buffer; } int sc_path_print(char *buf, size_t buflen, const sc_path_t *path) { size_t i; if (buf == NULL || path == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (buflen < path->len * 2 + path->aid.len * 2 + 1) return SC_ERROR_BUFFER_TOO_SMALL; buf[0] = '\0'; if (path->aid.len) { for (i = 0; i < path->aid.len; i++) snprintf(buf + strlen(buf), buflen - strlen(buf), "%02x", path->aid.value[i]); snprintf(buf + strlen(buf), buflen - strlen(buf), "::"); } for (i = 0; i < path->len; i++) snprintf(buf + strlen(buf), buflen - strlen(buf), "%02x", path->value[i]); if (!path->aid.len && path->type == SC_PATH_TYPE_DF_NAME) snprintf(buf + strlen(buf), buflen - strlen(buf), "::"); return SC_SUCCESS; } int sc_compare_path(const sc_path_t *path1, const sc_path_t *path2) { return path1->len == path2->len && !memcmp(path1->value, path2->value, path1->len); } int sc_compare_path_prefix(const sc_path_t *prefix, const sc_path_t *path) { sc_path_t tpath; if (prefix->len > path->len) return 0; tpath = *path; tpath.len = prefix->len; return sc_compare_path(&tpath, prefix); } const sc_path_t *sc_get_mf_path(void) { static const sc_path_t mf_path = { {0x3f, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 0, 0, SC_PATH_TYPE_PATH, {{0},0} }; return &mf_path; } int sc_file_add_acl_entry(sc_file_t *file, unsigned int operation, unsigned int method, unsigned long key_ref) { sc_acl_entry_t *p, *_new; if (file == NULL || operation >= SC_MAX_AC_OPS) { return SC_ERROR_INVALID_ARGUMENTS; } switch (method) { case SC_AC_NEVER: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 1; return SC_SUCCESS; case SC_AC_NONE: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 2; return SC_SUCCESS; case SC_AC_UNKNOWN: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 3; return SC_SUCCESS; default: /* NONE and UNKNOWN get zapped when a new AC is added. * If the ACL is NEVER, additional entries will be * dropped silently. */ if (file->acl[operation] == (sc_acl_entry_t *) 1) return SC_SUCCESS; if (file->acl[operation] == (sc_acl_entry_t *) 2 || file->acl[operation] == (sc_acl_entry_t *) 3) file->acl[operation] = NULL; } /* If the entry is already present (e.g. due to the mapping) * of the card's AC with OpenSC's), don't add it again. */ for (p = file->acl[operation]; p != NULL; p = p->next) { if ((p->method == method) && (p->key_ref == key_ref)) return SC_SUCCESS; } _new = malloc(sizeof(sc_acl_entry_t)); if (_new == NULL) return SC_ERROR_OUT_OF_MEMORY; _new->method = method; _new->key_ref = key_ref; _new->next = NULL; p = file->acl[operation]; if (p == NULL) { file->acl[operation] = _new; return SC_SUCCESS; } while (p->next != NULL) p = p->next; p->next = _new; return SC_SUCCESS; } const sc_acl_entry_t * sc_file_get_acl_entry(const sc_file_t *file, unsigned int operation) { sc_acl_entry_t *p; static const sc_acl_entry_t e_never = { SC_AC_NEVER, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; static const sc_acl_entry_t e_none = { SC_AC_NONE, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; static const sc_acl_entry_t e_unknown = { SC_AC_UNKNOWN, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; if (file == NULL || operation >= SC_MAX_AC_OPS) { return NULL; } p = file->acl[operation]; if (p == (sc_acl_entry_t *) 1) return &e_never; if (p == (sc_acl_entry_t *) 2) return &e_none; if (p == (sc_acl_entry_t *) 3) return &e_unknown; return file->acl[operation]; } void sc_file_clear_acl_entries(sc_file_t *file, unsigned int operation) { sc_acl_entry_t *e; if (file == NULL || operation >= SC_MAX_AC_OPS) { return; } e = file->acl[operation]; if (e == (sc_acl_entry_t *) 1 || e == (sc_acl_entry_t *) 2 || e == (sc_acl_entry_t *) 3) { file->acl[operation] = NULL; return; } while (e != NULL) { sc_acl_entry_t *tmp = e->next; free(e); e = tmp; } file->acl[operation] = NULL; } sc_file_t * sc_file_new(void) { sc_file_t *file = (sc_file_t *)calloc(1, sizeof(sc_file_t)); if (file == NULL) return NULL; file->magic = SC_FILE_MAGIC; return file; } void sc_file_free(sc_file_t *file) { unsigned int i; if (file == NULL || !sc_file_valid(file)) return; file->magic = 0; for (i = 0; i < SC_MAX_AC_OPS; i++) sc_file_clear_acl_entries(file, i); if (file->sec_attr) free(file->sec_attr); if (file->prop_attr) free(file->prop_attr); if (file->type_attr) free(file->type_attr); if (file->encoded_content) free(file->encoded_content); free(file); } void sc_file_dup(sc_file_t **dest, const sc_file_t *src) { sc_file_t *newf; const sc_acl_entry_t *e; unsigned int op; *dest = NULL; if (!sc_file_valid(src)) return; newf = sc_file_new(); if (newf == NULL) return; *dest = newf; memcpy(&newf->path, &src->path, sizeof(struct sc_path)); memcpy(&newf->name, &src->name, sizeof(src->name)); newf->namelen = src->namelen; newf->type = src->type; newf->shareable = src->shareable; newf->ef_structure = src->ef_structure; newf->size = src->size; newf->id = src->id; newf->status = src->status; for (op = 0; op < SC_MAX_AC_OPS; op++) { newf->acl[op] = NULL; e = sc_file_get_acl_entry(src, op); if (e != NULL) { if (sc_file_add_acl_entry(newf, op, e->method, e->key_ref) < 0) goto err; } } newf->record_length = src->record_length; newf->record_count = src->record_count; if (sc_file_set_sec_attr(newf, src->sec_attr, src->sec_attr_len) < 0) goto err; if (sc_file_set_prop_attr(newf, src->prop_attr, src->prop_attr_len) < 0) goto err; if (sc_file_set_type_attr(newf, src->type_attr, src->type_attr_len) < 0) goto err; if (sc_file_set_content(newf, src->encoded_content, src->encoded_content_len) < 0) goto err; return; err: sc_file_free(newf); *dest = NULL; } int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL || sec_attr_len) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; } int sc_file_set_prop_attr(sc_file_t *file, const u8 *prop_attr, size_t prop_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (prop_attr == NULL) { if (file->prop_attr != NULL) free(file->prop_attr); file->prop_attr = NULL; file->prop_attr_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->prop_attr, prop_attr_len); if (!tmp) { if (file->prop_attr) free(file->prop_attr); file->prop_attr = NULL; file->prop_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->prop_attr = tmp; memcpy(file->prop_attr, prop_attr, prop_attr_len); file->prop_attr_len = prop_attr_len; return SC_SUCCESS; } int sc_file_set_type_attr(sc_file_t *file, const u8 *type_attr, size_t type_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (type_attr == NULL) { if (file->type_attr != NULL) free(file->type_attr); file->type_attr = NULL; file->type_attr_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->type_attr, type_attr_len); if (!tmp) { if (file->type_attr) free(file->type_attr); file->type_attr = NULL; file->type_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->type_attr = tmp; memcpy(file->type_attr, type_attr, type_attr_len); file->type_attr_len = type_attr_len; return SC_SUCCESS; } int sc_file_set_content(sc_file_t *file, const u8 *content, size_t content_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (content == NULL) { if (file->encoded_content != NULL) free(file->encoded_content); file->encoded_content = NULL; file->encoded_content_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->encoded_content, content_len); if (!tmp) { if (file->encoded_content) free(file->encoded_content); file->encoded_content = NULL; file->encoded_content_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->encoded_content = tmp; memcpy(file->encoded_content, content, content_len); file->encoded_content_len = content_len; return SC_SUCCESS; } int sc_file_valid(const sc_file_t *file) { if (file == NULL) return 0; return file->magic == SC_FILE_MAGIC; } int _sc_parse_atr(sc_reader_t *reader) { u8 *p = reader->atr.value; int atr_len = (int) reader->atr.len; int n_hist, x; int tx[4] = {-1, -1, -1, -1}; int i, FI, DI; const int Fi_table[] = { 372, 372, 558, 744, 1116, 1488, 1860, -1, -1, 512, 768, 1024, 1536, 2048, -1, -1 }; const int f_table[] = { 40, 50, 60, 80, 120, 160, 200, -1, -1, 50, 75, 100, 150, 200, -1, -1 }; const int Di_table[] = { -1, 1, 2, 4, 8, 16, 32, -1, 12, 20, -1, -1, -1, -1, -1, -1 }; reader->atr_info.hist_bytes_len = 0; reader->atr_info.hist_bytes = NULL; if (atr_len == 0) { sc_log(reader->ctx, "empty ATR - card not present?\n"); return SC_ERROR_INTERNAL; } if (p[0] != 0x3B && p[0] != 0x3F) { sc_log(reader->ctx, "invalid sync byte in ATR: 0x%02X\n", p[0]); return SC_ERROR_INTERNAL; } n_hist = p[1] & 0x0F; x = p[1] >> 4; p += 2; atr_len -= 2; for (i = 0; i < 4 && atr_len > 0; i++) { if (x & (1 << i)) { tx[i] = *p; p++; atr_len--; } else tx[i] = -1; } if (tx[0] >= 0) { reader->atr_info.FI = FI = tx[0] >> 4; reader->atr_info.DI = DI = tx[0] & 0x0F; reader->atr_info.Fi = Fi_table[FI]; reader->atr_info.f = f_table[FI]; reader->atr_info.Di = Di_table[DI]; } else { reader->atr_info.Fi = -1; reader->atr_info.f = -1; reader->atr_info.Di = -1; } if (tx[2] >= 0) reader->atr_info.N = tx[3]; else reader->atr_info.N = -1; while (tx[3] > 0 && tx[3] & 0xF0 && atr_len > 0) { x = tx[3] >> 4; for (i = 0; i < 4 && atr_len > 0; i++) { if (x & (1 << i)) { tx[i] = *p; p++; atr_len--; } else tx[i] = -1; } } if (atr_len <= 0) return SC_SUCCESS; if (n_hist > atr_len) n_hist = atr_len; reader->atr_info.hist_bytes_len = n_hist; reader->atr_info.hist_bytes = p; return SC_SUCCESS; } void sc_mem_clear(void *ptr, size_t len) { if (len > 0) { #ifdef ENABLE_OPENSSL OPENSSL_cleanse(ptr, len); #else memset(ptr, 0, len); #endif } } int sc_mem_reverse(unsigned char *buf, size_t len) { unsigned char ch; size_t ii; if (!buf || !len) return SC_ERROR_INVALID_ARGUMENTS; for (ii = 0; ii < len / 2; ii++) { ch = *(buf + ii); *(buf + ii) = *(buf + len - 1 - ii); *(buf + len - 1 - ii) = ch; } return SC_SUCCESS; } static int sc_remote_apdu_allocate(struct sc_remote_data *rdata, struct sc_remote_apdu **new_rapdu) { struct sc_remote_apdu *rapdu = NULL, *rr; if (!rdata) return SC_ERROR_INVALID_ARGUMENTS; rapdu = calloc(1, sizeof(struct sc_remote_apdu)); if (rapdu == NULL) return SC_ERROR_OUT_OF_MEMORY; rapdu->apdu.data = &rapdu->sbuf[0]; rapdu->apdu.resp = &rapdu->rbuf[0]; rapdu->apdu.resplen = sizeof(rapdu->rbuf); if (new_rapdu) *new_rapdu = rapdu; if (rdata->data == NULL) { rdata->data = rapdu; rdata->length = 1; return SC_SUCCESS; } for (rr = rdata->data; rr->next; rr = rr->next) ; rr->next = rapdu; rdata->length++; return SC_SUCCESS; } static void sc_remote_apdu_free (struct sc_remote_data *rdata) { struct sc_remote_apdu *rapdu = NULL; if (!rdata) return; rapdu = rdata->data; while(rapdu) { struct sc_remote_apdu *rr = rapdu->next; free(rapdu); rapdu = rr; } } void sc_remote_data_init(struct sc_remote_data *rdata) { if (!rdata) return; memset(rdata, 0, sizeof(struct sc_remote_data)); rdata->alloc = sc_remote_apdu_allocate; rdata->free = sc_remote_apdu_free; } static unsigned long sc_CRC_tab32[256]; static int sc_CRC_tab32_initialized = 0; unsigned sc_crc32(const unsigned char *value, size_t len) { size_t ii, jj; unsigned long crc; unsigned long index, long_c; if (!sc_CRC_tab32_initialized) { for (ii=0; ii<256; ii++) { crc = (unsigned long) ii; for (jj=0; jj<8; jj++) { if ( crc & 0x00000001L ) crc = ( crc >> 1 ) ^ 0xEDB88320l; else crc = crc >> 1; } sc_CRC_tab32[ii] = crc; } sc_CRC_tab32_initialized = 1; } crc = 0xffffffffL; for (ii=0; ii<len; ii++) { long_c = 0x000000ffL & (unsigned long) (*(value + ii)); index = crc ^ long_c; crc = (crc >> 8) ^ sc_CRC_tab32[ index & 0xff ]; } crc ^= 0xffffffff; return crc%0xffff; } const u8 *sc_compacttlv_find_tag(const u8 *buf, size_t len, u8 tag, size_t *outlen) { if (buf != NULL) { size_t idx; u8 plain_tag = tag & 0xF0; size_t expected_len = tag & 0x0F; for (idx = 0; idx < len; idx++) { if ((buf[idx] & 0xF0) == plain_tag && idx + expected_len < len && (expected_len == 0 || expected_len == (buf[idx] & 0x0F))) { if (outlen != NULL) *outlen = buf[idx] & 0x0F; return buf + (idx + 1); } idx += (buf[idx] & 0x0F); } } return NULL; } /**************************** mutex functions ************************/ int sc_mutex_create(const sc_context_t *ctx, void **mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->create_mutex != NULL) return ctx->thread_ctx->create_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_lock(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->lock_mutex != NULL) return ctx->thread_ctx->lock_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_unlock(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->unlock_mutex != NULL) return ctx->thread_ctx->unlock_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_destroy(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->destroy_mutex != NULL) return ctx->thread_ctx->destroy_mutex(mutex); else return SC_SUCCESS; } unsigned long sc_thread_id(const sc_context_t *ctx) { if (ctx == NULL || ctx->thread_ctx == NULL || ctx->thread_ctx->thread_id == NULL) return 0UL; else return ctx->thread_ctx->thread_id(); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_344_7
crossvul-cpp_data_good_5871_1
/* -*- C -*- */ /* * block_template.c : Generic framework for block encryption algorithms * * Written by Andrew Kuchling and others * * =================================================================== * The contents of this file are dedicated to the public domain. To * the extent that dedication to the public domain is not available, * everyone is granted a worldwide, perpetual, royalty-free, * non-exclusive license to exercise all rights associated with the * contents of this file for any purpose whatsoever. * No rights are reserved. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * =================================================================== */ #include "pycrypto_common.h" #include "modsupport.h" #include <string.h> #include "_counter.h" /* Cipher operation modes */ #define MODE_ECB 1 #define MODE_CBC 2 #define MODE_CFB 3 #define MODE_PGP 4 #define MODE_OFB 5 #define MODE_CTR 6 #define _STR(x) #x #define _XSTR(x) _STR(x) #define _PASTE(x,y) x##y #define _PASTE2(x,y) _PASTE(x,y) #ifdef IS_PY3K #define _MODULE_NAME _PASTE2(PyInit_,MODULE_NAME) #else #define _MODULE_NAME _PASTE2(init,MODULE_NAME) #endif #define _MODULE_STRING _XSTR(MODULE_NAME) /* Object references for the counter_shortcut */ static PyObject *_counter_module = NULL; static PyTypeObject *PCT_CounterBEType = NULL; static PyTypeObject *PCT_CounterLEType = NULL; typedef struct { PyObject_HEAD int mode, count, segment_size; unsigned char IV[BLOCK_SIZE], oldCipher[BLOCK_SIZE]; PyObject *counter; int counter_shortcut; block_state st; } ALGobject; /* Please see PEP3123 for a discussion of PyObject_HEAD and changes made in 3.x to make it conform to Standard C. * These changes also dictate using Py_TYPE to check type, and PyVarObject_HEAD_INIT(NULL, 0) to initialize */ staticforward PyTypeObject ALGtype; static ALGobject * newALGobject(void) { ALGobject * new; new = PyObject_New(ALGobject, &ALGtype); new->mode = MODE_ECB; new->counter = NULL; new->counter_shortcut = 0; return new; } static void ALGdealloc(PyObject *ptr) { ALGobject *self = (ALGobject *)ptr; /* Overwrite the contents of the object */ Py_XDECREF(self->counter); self->counter = NULL; memset(self->IV, 0, BLOCK_SIZE); memset(self->oldCipher, 0, BLOCK_SIZE); memset((char*)&(self->st), 0, sizeof(block_state)); self->mode = self->count = self->segment_size = 0; PyObject_Del(ptr); } static char ALGnew__doc__[] = "new(key, [mode], [IV]): Return a new " _MODULE_STRING " encryption object."; static char *kwlist[] = {"key", "mode", "IV", "counter", "segment_size", #ifdef PCT_ARC2_MODULE "effective_keylen", #endif NULL}; static ALGobject * ALGnew(PyObject *self, PyObject *args, PyObject *kwdict) { unsigned char *key, *IV; ALGobject * new=NULL; int keylen, IVlen=0, mode=MODE_ECB, segment_size=0; PyObject *counter = NULL; int counter_shortcut = 0; #ifdef PCT_ARC2_MODULE int effective_keylen = 1024; /* this is a weird default, but it's compatible with old versions of PyCrypto */ #endif /* Set default values */ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "s#|is#Oi" #ifdef PCT_ARC2_MODULE "i" #endif , kwlist, &key, &keylen, &mode, &IV, &IVlen, &counter, &segment_size #ifdef PCT_ARC2_MODULE , &effective_keylen #endif )) { return NULL; } if (mode<MODE_ECB || mode>MODE_CTR) { PyErr_Format(PyExc_ValueError, "Unknown cipher feedback mode %i", mode); return NULL; } if (mode == MODE_PGP) { PyErr_Format(PyExc_ValueError, "MODE_PGP is not supported anymore"); return NULL; } if (KEY_SIZE!=0 && keylen!=KEY_SIZE) { PyErr_Format(PyExc_ValueError, "Key must be %i bytes long, not %i", KEY_SIZE, keylen); return NULL; } if (KEY_SIZE==0 && keylen==0) { PyErr_SetString(PyExc_ValueError, "Key cannot be the null string"); return NULL; } if (IVlen != 0 && mode == MODE_ECB) { PyErr_Format(PyExc_ValueError, "ECB mode does not use IV"); return NULL; } if (IVlen != 0 && mode == MODE_CTR) { PyErr_Format(PyExc_ValueError, "CTR mode needs counter parameter, not IV"); return NULL; } if (IVlen != BLOCK_SIZE && mode != MODE_ECB && mode != MODE_CTR) { PyErr_Format(PyExc_ValueError, "IV must be %i bytes long", BLOCK_SIZE); return NULL; } /* Mode-specific checks */ if (mode == MODE_CFB) { if (segment_size == 0) segment_size = 8; if (segment_size < 1 || segment_size > BLOCK_SIZE*8 || ((segment_size & 7) != 0)) { PyErr_Format(PyExc_ValueError, "segment_size must be multiple of 8 (bits) " "between 1 and %i", BLOCK_SIZE*8); return NULL; } } if (mode == MODE_CTR) { if (counter == NULL) { PyErr_SetString(PyExc_TypeError, "'counter' keyword parameter is required with CTR mode"); return NULL; } else if (Py_TYPE(counter) == PCT_CounterBEType || Py_TYPE(counter) == PCT_CounterLEType) { counter_shortcut = 1; } else if (!PyCallable_Check(counter)) { PyErr_SetString(PyExc_ValueError, "'counter' parameter must be a callable object"); return NULL; } } else { if (counter != NULL) { PyErr_SetString(PyExc_ValueError, "'counter' parameter only useful with CTR mode"); return NULL; } } /* Cipher-specific checks */ #ifdef PCT_ARC2_MODULE if (effective_keylen<0 || effective_keylen>1024) { PyErr_Format(PyExc_ValueError, "RC2: effective_keylen must be between 0 and 1024, not %i", effective_keylen); return NULL; } #endif /* Copy parameters into object */ new = newALGobject(); new->segment_size = segment_size; new->counter = counter; Py_XINCREF(counter); new->counter_shortcut = counter_shortcut; #ifdef PCT_ARC2_MODULE new->st.effective_keylen = effective_keylen; #endif block_init(&(new->st), key, keylen); if (PyErr_Occurred()) { Py_DECREF(new); return NULL; } memset(new->IV, 0, BLOCK_SIZE); memset(new->oldCipher, 0, BLOCK_SIZE); memcpy(new->IV, IV, IVlen); new->mode = mode; new->count=BLOCK_SIZE; /* stores how many bytes in new->oldCipher have been used */ return new; } static char ALG_Encrypt__doc__[] = "Encrypt the provided string of binary data."; static PyObject * ALG_Encrypt(ALGobject *self, PyObject *args) { unsigned char *buffer, *str; unsigned char temp[BLOCK_SIZE]; int i, j, len; PyObject *result; if (!PyArg_Parse(args, "s#", &str, &len)) return NULL; if (len==0) /* Handle empty string */ { return PyBytes_FromStringAndSize(NULL, 0); } if ( (len % BLOCK_SIZE) !=0 && (self->mode!=MODE_CFB) && (self->mode!=MODE_OFB) && (self->mode!=MODE_CTR)) { PyErr_Format(PyExc_ValueError, "Input strings must be " "a multiple of %i in length", BLOCK_SIZE); return NULL; } if (self->mode == MODE_CFB && (len % (self->segment_size/8) !=0)) { PyErr_Format(PyExc_ValueError, "Input strings must be a multiple of " "the segment size %i in length", self->segment_size/8); return NULL; } buffer=malloc(len); if (buffer==NULL) { PyErr_SetString(PyExc_MemoryError, "No memory available in " _MODULE_STRING " encrypt"); return NULL; } Py_BEGIN_ALLOW_THREADS; switch(self->mode) { case(MODE_ECB): for(i=0; i<len; i+=BLOCK_SIZE) { block_encrypt(&(self->st), str+i, buffer+i); } break; case(MODE_CBC): for(i=0; i<len; i+=BLOCK_SIZE) { for(j=0; j<BLOCK_SIZE; j++) { temp[j]=str[i+j]^self->IV[j]; } block_encrypt(&(self->st), temp, buffer+i); memcpy(self->IV, buffer+i, BLOCK_SIZE); } break; case(MODE_CFB): for(i=0; i<len; i+=self->segment_size/8) { block_encrypt(&(self->st), self->IV, temp); for (j=0; j<self->segment_size/8; j++) { buffer[i+j] = str[i+j] ^ temp[j]; } if (self->segment_size == BLOCK_SIZE * 8) { /* s == b: segment size is identical to the algorithm block size */ memcpy(self->IV, buffer + i, BLOCK_SIZE); } else if ((self->segment_size % 8) == 0) { int sz = self->segment_size/8; memmove(self->IV, self->IV + sz, BLOCK_SIZE-sz); memcpy(self->IV + BLOCK_SIZE - sz, buffer + i, sz); } else { /* segment_size is not a multiple of 8; currently this can't happen */ } } break; case(MODE_OFB): /* OFB mode is a stream cipher whose keystream is generated by encrypting the previous ciphered output. * - self->IV stores the current keystream block * - self->count indicates the current offset within the current keystream block * - str stores the input string * - buffer stores the output string * - len indicates the length of the input and output strings * - i indicates the current offset within the input and output strings * (len-i) is the number of bytes remaining to encrypt * (BLOCK_SIZE-self->count) is the number of bytes remaining in the current keystream block */ i = 0; while(i < len) { /* If we don't need more than what remains of the current keystream block, then just XOR it in */ if (len-i <= BLOCK_SIZE-self->count) { /* remaining_bytes_to_encrypt <= remaining_bytes_in_IV */ /* XOR until the input is used up */ for(j=0; j<(len-i); j++) { assert(i+j < len); assert(self->count+j < BLOCK_SIZE); buffer[i+j] = self->IV[self->count+j] ^ str[i+j]; } self->count += len-i; i = len; continue; } /* Use up the current keystream block */ for(j=0; j<BLOCK_SIZE-self->count; j++) { assert(i+j < len); assert(self->count+j < BLOCK_SIZE); buffer[i+j] = self->IV[self->count+j] ^ str[i+j]; } i += BLOCK_SIZE-self->count; self->count = BLOCK_SIZE; /* Generate a new keystream block */ block_encrypt(&(self->st), self->IV, temp); memcpy(self->IV, temp, BLOCK_SIZE); /* Move the pointer to the start of the keystream */ self->count = 0; } break; case(MODE_CTR): /* CTR mode is a stream cipher whose keystream is generated by encrypting unique counter values. * - self->counter points to the Counter callable, which is * responsible for generating keystream blocks * - self->count indicates the current offset within the current keystream block * - self->IV stores the current keystream block * - str stores the input string * - buffer stores the output string * - len indicates the length if the input and output strings * - i indicates the current offset within the input and output strings * - (len-i) is the number of bytes remaining to encrypt * - (BLOCK_SIZE-self->count) is the number of bytes remaining in the current keystream block */ i = 0; while (i < len) { /* If we don't need more than what remains of the current keystream block, then just XOR it in */ if (len-i <= BLOCK_SIZE-self->count) { /* remaining_bytes_to_encrypt <= remaining_bytes_in_IV */ /* XOR until the input is used up */ for(j=0; j<(len-i); j++) { assert(i+j < len); assert(self->count+j < BLOCK_SIZE); buffer[i+j] = (self->IV[self->count+j] ^= str[i+j]); } self->count += len-i; i = len; continue; } /* Use up the current keystream block */ for(j=0; j<BLOCK_SIZE-self->count; j++) { assert(i+j < len); assert(self->count+j < BLOCK_SIZE); buffer[i+j] = (self->IV[self->count+j] ^= str[i+j]); } i += BLOCK_SIZE-self->count; self->count = BLOCK_SIZE; /* Generate a new keystream block */ if (self->counter_shortcut) { /* CTR mode shortcut: If we're using Util.Counter, * bypass the normal Python function call mechanism * and manipulate the counter directly. */ PCT_CounterObject *ctr = (PCT_CounterObject *)(self->counter); if (ctr->carry && !ctr->allow_wraparound) { Py_BLOCK_THREADS; PyErr_SetString(PyExc_OverflowError, "counter wrapped without allow_wraparound"); free(buffer); return NULL; } if (ctr->buf_size != BLOCK_SIZE) { Py_BLOCK_THREADS; PyErr_Format(PyExc_TypeError, "CTR counter function returned " "string of length %zi, not %i", ctr->buf_size, BLOCK_SIZE); free(buffer); return NULL; } block_encrypt(&(self->st), (unsigned char *)ctr->val, self->IV); ctr->inc_func(ctr); } else { PyObject *ctr; Py_BLOCK_THREADS; ctr = PyObject_CallObject(self->counter, NULL); if (ctr == NULL) { free(buffer); return NULL; } if (!PyBytes_Check(ctr)) { PyErr_SetString(PyExc_TypeError, "CTR counter function didn't return a bytestring"); Py_DECREF(ctr); free(buffer); return NULL; } if (PyBytes_Size(ctr) != BLOCK_SIZE) { PyErr_Format(PyExc_TypeError, "CTR counter function returned " "bytestring not of length %i", BLOCK_SIZE); Py_DECREF(ctr); free(buffer); return NULL; } Py_UNBLOCK_THREADS; block_encrypt(&(self->st), (unsigned char *)PyBytes_AsString(ctr), self->IV); Py_BLOCK_THREADS; Py_DECREF(ctr); Py_UNBLOCK_THREADS; } /* Move the pointer to the start of the keystream block */ self->count = 0; } break; default: Py_BLOCK_THREADS; PyErr_Format(PyExc_SystemError, "Unknown ciphertext feedback mode %i; " "this shouldn't happen", self->mode); free(buffer); return NULL; } Py_END_ALLOW_THREADS; result=PyBytes_FromStringAndSize((char *) buffer, len); free(buffer); return(result); } static char ALG_Decrypt__doc__[] = "decrypt(string): Decrypt the provided string of binary data."; static PyObject * ALG_Decrypt(ALGobject *self, PyObject *args) { unsigned char *buffer, *str; unsigned char temp[BLOCK_SIZE]; int i, j, len; PyObject *result; /* CTR and OFB mode decryption is identical to encryption */ if (self->mode == MODE_CTR || self->mode == MODE_OFB) return ALG_Encrypt(self, args); if (!PyArg_Parse(args, "s#", &str, &len)) return NULL; if (len==0) /* Handle empty string */ { return PyBytes_FromStringAndSize(NULL, 0); } if ( (len % BLOCK_SIZE) !=0 && (self->mode!=MODE_CFB)) { PyErr_Format(PyExc_ValueError, "Input strings must be " "a multiple of %i in length", BLOCK_SIZE); return NULL; } if (self->mode == MODE_CFB && (len % (self->segment_size/8) !=0)) { PyErr_Format(PyExc_ValueError, "Input strings must be a multiple of " "the segment size %i in length", self->segment_size/8); return NULL; } buffer=malloc(len); if (buffer==NULL) { PyErr_SetString(PyExc_MemoryError, "No memory available in " _MODULE_STRING " decrypt"); return NULL; } Py_BEGIN_ALLOW_THREADS; switch(self->mode) { case(MODE_ECB): for(i=0; i<len; i+=BLOCK_SIZE) { block_decrypt(&(self->st), str+i, buffer+i); } break; case(MODE_CBC): for(i=0; i<len; i+=BLOCK_SIZE) { memcpy(self->oldCipher, self->IV, BLOCK_SIZE); block_decrypt(&(self->st), str+i, temp); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j]=temp[j]^self->IV[j]; self->IV[j]=str[i+j]; } } break; case(MODE_CFB): for(i=0; i<len; i+=self->segment_size/8) { block_encrypt(&(self->st), self->IV, temp); for (j=0; j<self->segment_size/8; j++) { buffer[i+j] = str[i+j]^temp[j]; } if (self->segment_size == BLOCK_SIZE * 8) { /* s == b: segment size is identical to the algorithm block size */ memcpy(self->IV, str + i, BLOCK_SIZE); } else if ((self->segment_size % 8) == 0) { int sz = self->segment_size/8; memmove(self->IV, self->IV + sz, BLOCK_SIZE-sz); memcpy(self->IV + BLOCK_SIZE - sz, str + i, sz); } else { /* segment_size is not a multiple of 8; currently this can't happen */ } } break; default: Py_BLOCK_THREADS; PyErr_Format(PyExc_SystemError, "Unknown ciphertext feedback mode %i; " "this shouldn't happen", self->mode); free(buffer); return NULL; } Py_END_ALLOW_THREADS; result=PyBytes_FromStringAndSize((char *) buffer, len); free(buffer); return(result); } /* ALG object methods */ static PyMethodDef ALGmethods[] = { {"encrypt", (PyCFunction) ALG_Encrypt, METH_O, ALG_Encrypt__doc__}, {"decrypt", (PyCFunction) ALG_Decrypt, METH_O, ALG_Decrypt__doc__}, {NULL, NULL} /* sentinel */ }; static int ALGsetattr(PyObject *ptr, char *name, PyObject *v) { ALGobject *self=(ALGobject *)ptr; if (strcmp(name, "IV") != 0) { PyErr_Format(PyExc_AttributeError, "non-existent block cipher object attribute '%s'", name); return -1; } if (v==NULL) { PyErr_SetString(PyExc_AttributeError, "Can't delete IV attribute of block cipher object"); return -1; } if (!PyBytes_Check(v)) { PyErr_SetString(PyExc_TypeError, "IV attribute of block cipher object must be bytestring"); return -1; } if (PyBytes_Size(v)!=BLOCK_SIZE) { PyErr_Format(PyExc_ValueError, _MODULE_STRING " IV must be %i bytes long", BLOCK_SIZE); return -1; } memcpy(self->IV, PyBytes_AsString(v), BLOCK_SIZE); return 0; } static PyObject * ALGgetattro(PyObject *s, PyObject *attr) { ALGobject *self = (ALGobject*)s; if (!PyString_Check(attr)) goto generic; if (PyString_CompareWithASCIIString(attr, "IV") == 0) { return(PyBytes_FromStringAndSize((char *) self->IV, BLOCK_SIZE)); } if (PyString_CompareWithASCIIString(attr, "mode") == 0) { return(PyInt_FromLong((long)(self->mode))); } if (PyString_CompareWithASCIIString(attr, "block_size") == 0) { return PyInt_FromLong(BLOCK_SIZE); } if (PyString_CompareWithASCIIString(attr, "key_size") == 0) { return PyInt_FromLong(KEY_SIZE); } generic: #if PYTHON_API_VERSION >= 1011 /* Python 2.2 and later */ return PyObject_GenericGetAttr(s, attr); #else if (PyString_Check(attr) < 0) { PyErr_SetObject(PyExc_AttributeError, attr); return NULL; } return Py_FindMethod(ALGmethods, (PyObject *)self, PyString_AsString(attr)); #endif } /* List of functions defined in the module */ static struct PyMethodDef modulemethods[] = { {"new", (PyCFunction) ALGnew, METH_VARARGS|METH_KEYWORDS, ALGnew__doc__}, {NULL, NULL} /* sentinel */ }; static PyTypeObject ALGtype = { PyVarObject_HEAD_INIT(NULL, 0) /* deferred type init for compilation on Windows, type will be filled in at runtime */ _MODULE_STRING, /*tp_name*/ sizeof(ALGobject), /*tp_size*/ 0, /*tp_itemsize*/ /* methods */ (destructor) ALGdealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ ALGsetattr, /*tp_setattr*/ 0, /*tp_compare*/ (reprfunc) 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence */ 0, /*tp_as_mapping */ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ ALGgetattro, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ #if PYTHON_API_VERSION >= 1011 /* Python 2.2 and later */ 0, /*tp_iter*/ 0, /*tp_iternext*/ ALGmethods, /*tp_methods*/ #endif }; #ifdef IS_PY3K static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "Crypto.Cipher." _MODULE_STRING, NULL, -1, modulemethods, NULL, NULL, NULL, NULL }; #endif /* Initialization function for the module */ PyMODINIT_FUNC _MODULE_NAME (void) { PyObject *m = NULL; PyObject *abiver = NULL; PyObject *__all__ = NULL; if (PyType_Ready(&ALGtype) < 0) goto errout; /* Create the module and add the functions */ #ifdef IS_PY3K m = PyModule_Create(&moduledef); #else m = Py_InitModule("Crypto.Cipher." _MODULE_STRING, modulemethods); #endif if (m == NULL) goto errout; /* Add the type object to the module (using the name of the module itself), * so that its methods docstrings are discoverable by introspection tools. */ PyObject_SetAttrString(m, _MODULE_STRING, (PyObject *)&ALGtype); /* Add some symbolic constants to the module */ PyModule_AddIntConstant(m, "MODE_ECB", MODE_ECB); PyModule_AddIntConstant(m, "MODE_CBC", MODE_CBC); PyModule_AddIntConstant(m, "MODE_CFB", MODE_CFB); PyModule_AddIntConstant(m, "MODE_PGP", MODE_PGP); /** Vestigial **/ PyModule_AddIntConstant(m, "MODE_OFB", MODE_OFB); PyModule_AddIntConstant(m, "MODE_CTR", MODE_CTR); PyModule_AddIntConstant(m, "block_size", BLOCK_SIZE); PyModule_AddIntConstant(m, "key_size", KEY_SIZE); /* Import CounterBE and CounterLE from the _counter module */ Py_CLEAR(_counter_module); _counter_module = PyImport_ImportModule("Crypto.Util._counter"); if (_counter_module == NULL) goto errout; PCT_CounterBEType = (PyTypeObject *)PyObject_GetAttrString(_counter_module, "CounterBE"); PCT_CounterLEType = (PyTypeObject *)PyObject_GetAttrString(_counter_module, "CounterLE"); /* Simple ABI version check in case the user doesn't re-compile all of * the modules during an upgrade. */ abiver = PyObject_GetAttrString(_counter_module, "_PCT_CTR_ABI_VERSION"); if (PCT_CounterBEType == NULL || PyType_Check((PyObject *)PCT_CounterBEType) < 0 || PCT_CounterLEType == NULL || PyType_Check((PyObject *)PCT_CounterLEType) < 0 || abiver == NULL || PyInt_CheckExact(abiver) < 0 || PyInt_AS_LONG(abiver) != PCT_CTR_ABI_VERSION) { PyErr_SetString(PyExc_ImportError, "Crypto.Util._counter ABI mismatch. Was PyCrypto incorrectly compiled?"); goto errout; } /* Create __all__ (to help generate documentation) */ __all__ = PyList_New(10); if (__all__ == NULL) goto errout; PyList_SetItem(__all__, 0, PyString_FromString(_MODULE_STRING)); /* This is the ALGType object */ PyList_SetItem(__all__, 1, PyString_FromString("new")); PyList_SetItem(__all__, 2, PyString_FromString("MODE_ECB")); PyList_SetItem(__all__, 3, PyString_FromString("MODE_CBC")); PyList_SetItem(__all__, 4, PyString_FromString("MODE_CFB")); PyList_SetItem(__all__, 5, PyString_FromString("MODE_PGP")); PyList_SetItem(__all__, 6, PyString_FromString("MODE_OFB")); PyList_SetItem(__all__, 7, PyString_FromString("MODE_CTR")); PyList_SetItem(__all__, 8, PyString_FromString("block_size")); PyList_SetItem(__all__, 9, PyString_FromString("key_size")); PyObject_SetAttrString(m, "__all__", __all__); out: /* Final error check */ if (m == NULL && !PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "can't initialize module"); goto errout; } /* Free local objects here */ Py_CLEAR(abiver); Py_CLEAR(__all__); /* Return */ #ifdef IS_PY3K return m; #else return; #endif errout: /* Free the module and other global objects here */ Py_CLEAR(m); Py_CLEAR(_counter_module); Py_CLEAR(PCT_CounterBEType); Py_CLEAR(PCT_CounterLEType); goto out; } /* vim:set ts=4 sw=4 sts=0 noexpandtab: */
./CrossVul/dataset_final_sorted/CWE-119/c/good_5871_1
crossvul-cpp_data_bad_2021_0
/* * fs/cifs/file.c * * vfs operations that deal with files * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) * Jeremy Allison (jra@samba.org) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/backing-dev.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/delay.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/swap.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "fscache.h" static inline int cifs_convert_flags(unsigned int flags) { if ((flags & O_ACCMODE) == O_RDONLY) return GENERIC_READ; else if ((flags & O_ACCMODE) == O_WRONLY) return GENERIC_WRITE; else if ((flags & O_ACCMODE) == O_RDWR) { /* GENERIC_ALL is too much permission to request can cause unnecessary access denied on create */ /* return GENERIC_ALL; */ return (GENERIC_READ | GENERIC_WRITE); } return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | FILE_READ_DATA); } static u32 cifs_posix_convert_flags(unsigned int flags) { u32 posix_flags = 0; if ((flags & O_ACCMODE) == O_RDONLY) posix_flags = SMB_O_RDONLY; else if ((flags & O_ACCMODE) == O_WRONLY) posix_flags = SMB_O_WRONLY; else if ((flags & O_ACCMODE) == O_RDWR) posix_flags = SMB_O_RDWR; if (flags & O_CREAT) { posix_flags |= SMB_O_CREAT; if (flags & O_EXCL) posix_flags |= SMB_O_EXCL; } else if (flags & O_EXCL) cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n", current->comm, current->tgid); if (flags & O_TRUNC) posix_flags |= SMB_O_TRUNC; /* be safe and imply O_SYNC for O_DSYNC */ if (flags & O_DSYNC) posix_flags |= SMB_O_SYNC; if (flags & O_DIRECTORY) posix_flags |= SMB_O_DIRECTORY; if (flags & O_NOFOLLOW) posix_flags |= SMB_O_NOFOLLOW; if (flags & O_DIRECT) posix_flags |= SMB_O_DIRECT; return posix_flags; } static inline int cifs_get_disposition(unsigned int flags) { if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) return FILE_CREATE; else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) return FILE_OVERWRITE_IF; else if ((flags & O_CREAT) == O_CREAT) return FILE_OPEN_IF; else if ((flags & O_TRUNC) == O_TRUNC) return FILE_OVERWRITE; else return FILE_OPEN; } int cifs_posix_open(char *full_path, struct inode **pinode, struct super_block *sb, int mode, unsigned int f_flags, __u32 *poplock, __u16 *pnetfid, unsigned int xid) { int rc; FILE_UNIX_BASIC_INFO *presp_data; __u32 posix_flags = 0; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fattr fattr; struct tcon_link *tlink; struct cifs_tcon *tcon; cifs_dbg(FYI, "posix open %s\n", full_path); presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); if (presp_data == NULL) return -ENOMEM; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); goto posix_open_ret; } tcon = tlink_tcon(tlink); mode &= ~current_umask(); posix_flags = cifs_posix_convert_flags(f_flags); rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, poplock, full_path, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cifs_put_tlink(tlink); if (rc) goto posix_open_ret; if (presp_data->Type == cpu_to_le32(-1)) goto posix_open_ret; /* open ok, caller does qpathinfo */ if (!pinode) goto posix_open_ret; /* caller does not need info */ cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); /* get new inode and set it up */ if (*pinode == NULL) { cifs_fill_uniqueid(sb, &fattr); *pinode = cifs_iget(sb, &fattr); if (!*pinode) { rc = -ENOMEM; goto posix_open_ret; } } else { cifs_fattr_to_inode(*pinode, &fattr); } posix_open_ret: kfree(presp_data); return rc; } static int cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, struct cifs_fid *fid, unsigned int xid) { int rc; int desired_access; int disposition; int create_options = CREATE_NOT_DIR; FILE_ALL_INFO *buf; struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; if (!server->ops->open) return -ENOSYS; desired_access = cifs_convert_flags(f_flags); /********************************************************************* * open flag mapping table: * * POSIX Flag CIFS Disposition * ---------- ---------------- * O_CREAT FILE_OPEN_IF * O_CREAT | O_EXCL FILE_CREATE * O_CREAT | O_TRUNC FILE_OVERWRITE_IF * O_TRUNC FILE_OVERWRITE * none of the above FILE_OPEN * * Note that there is not a direct match between disposition * FILE_SUPERSEDE (ie create whether or not file exists although * O_CREAT | O_TRUNC is similar but truncates the existing * file rather than creating a new file as FILE_SUPERSEDE does * (which uses the attributes / metadata passed in on open call) *? *? O_SYNC is a reasonable match to CIFS writethrough flag *? and the read write flags match reasonably. O_LARGEFILE *? is irrelevant because largefile support is always used *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation *********************************************************************/ disposition = cifs_get_disposition(f_flags); /* BB pass O_SYNC flag through on file attributes .. BB */ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (!buf) return -ENOMEM; if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; oparms.tcon = tcon; oparms.cifs_sb = cifs_sb; oparms.desired_access = desired_access; oparms.create_options = create_options; oparms.disposition = disposition; oparms.path = full_path; oparms.fid = fid; oparms.reconnect = false; rc = server->ops->open(xid, &oparms, oplock, buf); if (rc) goto out; if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, xid, fid); out: kfree(buf); return rc; } static bool cifs_has_mand_locks(struct cifsInodeInfo *cinode) { struct cifs_fid_locks *cur; bool has_locks = false; down_read(&cinode->lock_sem); list_for_each_entry(cur, &cinode->llist, llist) { if (!list_empty(&cur->locks)) { has_locks = true; break; } } up_read(&cinode->lock_sem); return has_locks; } struct cifsFileInfo * cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifsFileInfo *cfile; struct cifs_fid_locks *fdlocks; struct cifs_tcon *tcon = tlink_tcon(tlink); struct TCP_Server_Info *server = tcon->ses->server; cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (cfile == NULL) return cfile; fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL); if (!fdlocks) { kfree(cfile); return NULL; } INIT_LIST_HEAD(&fdlocks->locks); fdlocks->cfile = cfile; cfile->llist = fdlocks; down_write(&cinode->lock_sem); list_add(&fdlocks->llist, &cinode->llist); up_write(&cinode->lock_sem); cfile->count = 1; cfile->pid = current->tgid; cfile->uid = current_fsuid(); cfile->dentry = dget(dentry); cfile->f_flags = file->f_flags; cfile->invalidHandle = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); mutex_init(&cfile->fh_mutex); cifs_sb_active(inode->i_sb); /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. */ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); oplock = 0; } spin_lock(&cifs_file_list_lock); if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) oplock = fid->pending_open->oplock; list_del(&fid->pending_open->olist); fid->purge_cache = false; server->ops->set_fid(cfile, fid, oplock); list_add(&cfile->tlist, &tcon->openFileList); /* if readable file instance put first in list*/ if (file->f_mode & FMODE_READ) list_add(&cfile->flist, &cinode->openFileList); else list_add_tail(&cfile->flist, &cinode->openFileList); spin_unlock(&cifs_file_list_lock); if (fid->purge_cache) cifs_invalidate_mapping(inode); file->private_data = cfile; return cfile; } struct cifsFileInfo * cifsFileInfo_get(struct cifsFileInfo *cifs_file) { spin_lock(&cifs_file_list_lock); cifsFileInfo_get_locked(cifs_file); spin_unlock(&cifs_file_list_lock); return cifs_file; } /* * Release a reference on the file private data. This may involve closing * the filehandle out on the server. Must be called without holding * cifs_file_list_lock. */ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) { struct inode *inode = cifs_file->dentry->d_inode; struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); struct super_block *sb = inode->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; spin_lock(&cifs_file_list_lock); if (--cifs_file->count > 0) { spin_unlock(&cifs_file_list_lock); return; } if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); /* store open in pending opens to make sure we don't miss lease break */ cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); /* remove it from the lists */ list_del(&cifs_file->flist); list_del(&cifs_file->tlist); if (list_empty(&cifsi->openFileList)) { cifs_dbg(FYI, "closing last open instance for inode %p\n", cifs_file->dentry->d_inode); /* * In strict cache mode we need invalidate mapping on the last * close because it may cause a error when we open this file * again and get at least level II oplock. */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) CIFS_I(inode)->invalid_mapping = true; cifs_set_oplock_level(cifsi, 0); } spin_unlock(&cifs_file_list_lock); cancel_work_sync(&cifs_file->oplock_break); if (!tcon->need_reconnect && !cifs_file->invalidHandle) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int xid; xid = get_xid(); if (server->ops->close) server->ops->close(xid, tcon, &cifs_file->fid); _free_xid(xid); } cifs_del_pending_open(&open); /* * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); } list_del(&cifs_file->llist->llist); kfree(cifs_file->llist); up_write(&cifsi->lock_sem); cifs_put_tlink(cifs_file->tlink); dput(cifs_file->dentry); cifs_sb_deactive(sb); kfree(cifs_file); } int cifs_open(struct inode *inode, struct file *file) { int rc = -EACCES; unsigned int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *server; struct cifs_tcon *tcon; struct tcon_link *tlink; struct cifsFileInfo *cfile = NULL; char *full_path = NULL; bool posix_open_ok = false; struct cifs_fid fid; struct cifs_pending_open open; xid = get_xid(); cifs_sb = CIFS_SB(inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { free_xid(xid); return PTR_ERR(tlink); } tcon = tlink_tcon(tlink); server = tcon->ses->server; full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; goto out; } cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", inode, file->f_flags, full_path); if (server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; if (!tcon->broken_posix_open && tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, &inode, inode->i_sb, cifs_sb->mnt_file_mode /* ignored */, file->f_flags, &oplock, &fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix open succeeded\n"); posix_open_ok = true; } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { if (tcon->ses->serverNOS) cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n", tcon->ses->serverName, tcon->ses->serverNOS); tcon->broken_posix_open = true; } else if ((rc != -EIO) && (rc != -EREMOTE) && (rc != -EOPNOTSUPP)) /* path not found or net err */ goto out; /* * Else fallthrough to retry open the old way on network i/o * or DFS errors. */ } if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); cifs_add_pending_open(&fid, tlink, &open); if (!posix_open_ok) { if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, xid); if (rc) { cifs_del_pending_open(&open); goto out; } } cfile = cifs_new_fileinfo(&fid, file, tlink, oplock); if (cfile == NULL) { if (server->ops->close) server->ops->close(xid, tcon, &fid); cifs_del_pending_open(&open); rc = -ENOMEM; goto out; } cifs_fscache_set_inode_cookie(inode, file); if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { /* * Time to set mode which we can not set earlier due to * problems creating new read-only files. */ struct cifs_unix_set_info_args args = { .mode = inode->i_mode, .uid = INVALID_UID, /* no change */ .gid = INVALID_GID, /* no change */ .ctime = NO_CHANGE_64, .atime = NO_CHANGE_64, .mtime = NO_CHANGE_64, .device = 0, }; CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, cfile->pid); } out: kfree(full_path); free_xid(xid); cifs_put_tlink(tlink); return rc; } static int cifs_push_posix_locks(struct cifsFileInfo *cfile); /* * Try to reacquire byte range locks that were released when session * to server was lost. */ static int cifs_relock_file(struct cifsFileInfo *cfile) { struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; down_read(&cinode->lock_sem); if (cinode->can_cache_brlcks) { /* can cache locks - no need to relock */ up_read(&cinode->lock_sem); return rc; } if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else rc = tcon->ses->server->ops->push_mand_locks(cfile); up_read(&cinode->lock_sem); return rc; } static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) { int rc = -EACCES; unsigned int xid; __u32 oplock; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsInodeInfo *cinode; struct inode *inode; char *full_path = NULL; int desired_access; int disposition = FILE_OPEN; int create_options = CREATE_NOT_DIR; struct cifs_open_parms oparms; xid = get_xid(); mutex_lock(&cfile->fh_mutex); if (!cfile->invalidHandle) { mutex_unlock(&cfile->fh_mutex); rc = 0; free_xid(xid); return rc; } inode = cfile->dentry->d_inode; cifs_sb = CIFS_SB(inode->i_sb); tcon = tlink_tcon(cfile->tlink); server = tcon->ses->server; /* * Can not grab rename sem here because various ops, including those * that already have the rename sem can end up causing writepage to get * called and if the server was down that means we end up here, and we * can never tell if the caller already has the rename_sem. */ full_path = build_path_from_dentry(cfile->dentry); if (full_path == NULL) { rc = -ENOMEM; mutex_unlock(&cfile->fh_mutex); free_xid(xid); return rc; } cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n", inode, cfile->f_flags, full_path); if (tcon->ses->server->oplocks) oplock = REQ_OPLOCK; else oplock = 0; if (tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* * O_CREAT, O_EXCL and O_TRUNC already had their effect on the * original open. Must mask them off for a reopen. */ unsigned int oflags = cfile->f_flags & ~(O_CREAT | O_EXCL | O_TRUNC); rc = cifs_posix_open(full_path, NULL, inode->i_sb, cifs_sb->mnt_file_mode /* ignored */, oflags, &oplock, &cfile->fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix reopen succeeded\n"); oparms.reconnect = true; goto reopen_success; } /* * fallthrough to retry open the old way on errors, especially * in the reconnect path it is important to retry hard */ } desired_access = cifs_convert_flags(cfile->f_flags); if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &cfile->fid); oparms.tcon = tcon; oparms.cifs_sb = cifs_sb; oparms.desired_access = desired_access; oparms.create_options = create_options; oparms.disposition = disposition; oparms.path = full_path; oparms.fid = &cfile->fid; oparms.reconnect = true; /* * Can not refresh inode by passing in file_info buf to be returned by * ops->open and then calling get_inode_info with returned buf since * file might have write behind data that needs to be flushed and server * version of file size can be stale. If we knew for sure that inode was * not dirty locally we could do this. */ rc = server->ops->open(xid, &oparms, &oplock, NULL); if (rc == -ENOENT && oparms.reconnect == false) { /* durable handle timeout is expired - open the file again */ rc = server->ops->open(xid, &oparms, &oplock, NULL); /* indicate that we need to relock the file */ oparms.reconnect = true; } if (rc) { mutex_unlock(&cfile->fh_mutex); cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc); cifs_dbg(FYI, "oplock: %d\n", oplock); goto reopen_error_exit; } reopen_success: cfile->invalidHandle = false; mutex_unlock(&cfile->fh_mutex); cinode = CIFS_I(inode); if (can_flush) { rc = filemap_write_and_wait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, xid); else rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, xid, NULL); } /* * Else we are writing out data to server already and could deadlock if * we tried to flush data, and since we do not know if we have data that * would invalidate the current end of file on the server we can not go * to the server to get the new inode info. */ server->ops->set_fid(cfile, &cfile->fid, oplock); if (oparms.reconnect) cifs_relock_file(cfile); reopen_error_exit: kfree(full_path); free_xid(xid); return rc; } int cifs_close(struct inode *inode, struct file *file) { if (file->private_data != NULL) { cifsFileInfo_put(file->private_data); file->private_data = NULL; } /* return code from the ->release op is always ignored */ return 0; } int cifs_closedir(struct inode *inode, struct file *file) { int rc = 0; unsigned int xid; struct cifsFileInfo *cfile = file->private_data; struct cifs_tcon *tcon; struct TCP_Server_Info *server; char *buf; cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode); if (cfile == NULL) return rc; xid = get_xid(); tcon = tlink_tcon(cfile->tlink); server = tcon->ses->server; cifs_dbg(FYI, "Freeing private data in close dir\n"); spin_lock(&cifs_file_list_lock); if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) { cfile->invalidHandle = true; spin_unlock(&cifs_file_list_lock); if (server->ops->close_dir) rc = server->ops->close_dir(xid, tcon, &cfile->fid); else rc = -ENOSYS; cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); /* not much we can do if it fails anyway, ignore rc */ rc = 0; } else spin_unlock(&cifs_file_list_lock); buf = cfile->srch_inf.ntwrk_buf_start; if (buf) { cifs_dbg(FYI, "closedir free smb buf in srch struct\n"); cfile->srch_inf.ntwrk_buf_start = NULL; if (cfile->srch_inf.smallBuf) cifs_small_buf_release(buf); else cifs_buf_release(buf); } cifs_put_tlink(cfile->tlink); kfree(file->private_data); file->private_data = NULL; /* BB can we lock the filestruct while this is going on? */ free_xid(xid); return rc; } static struct cifsLockInfo * cifs_lock_init(__u64 offset, __u64 length, __u8 type) { struct cifsLockInfo *lock = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); if (!lock) return lock; lock->offset = offset; lock->length = length; lock->type = type; lock->pid = current->tgid; INIT_LIST_HEAD(&lock->blist); init_waitqueue_head(&lock->block_q); return lock; } void cifs_del_lock_waiters(struct cifsLockInfo *lock) { struct cifsLockInfo *li, *tmp; list_for_each_entry_safe(li, tmp, &lock->blist, blist) { list_del_init(&li->blist); wake_up(&li->block_q); } } #define CIFS_LOCK_OP 0 #define CIFS_READ_OP 1 #define CIFS_WRITE_OP 2 /* @rw_check : 0 - no op, 1 - read, 2 - write */ static bool cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset, __u64 length, __u8 type, struct cifsFileInfo *cfile, struct cifsLockInfo **conf_lock, int rw_check) { struct cifsLockInfo *li; struct cifsFileInfo *cur_cfile = fdlocks->cfile; struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; list_for_each_entry(li, &fdlocks->locks, llist) { if (offset + length <= li->offset || offset >= li->offset + li->length) continue; if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && server->ops->compare_fids(cfile, cur_cfile)) { /* shared lock prevents write op through the same fid */ if (!(li->type & server->vals->shared_lock_type) || rw_check != CIFS_WRITE_OP) continue; } if ((type & server->vals->shared_lock_type) && ((server->ops->compare_fids(cfile, cur_cfile) && current->tgid == li->pid) || type == li->type)) continue; if (conf_lock) *conf_lock = li; return true; } return false; } bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u8 type, struct cifsLockInfo **conf_lock, int rw_check) { bool rc = false; struct cifs_fid_locks *cur; struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); list_for_each_entry(cur, &cinode->llist, llist) { rc = cifs_find_fid_lock_conflict(cur, offset, length, type, cfile, conf_lock, rw_check); if (rc) break; } return rc; } /* * Check if there is another lock that prevents us to set the lock (mandatory * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise. */ static int cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u8 type, struct file_lock *flock) { int rc = 0; struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; bool exist; down_read(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, offset, length, type, &conf_lock, CIFS_LOCK_OP); if (exist) { flock->fl_start = conf_lock->offset; flock->fl_end = conf_lock->offset + conf_lock->length - 1; flock->fl_pid = conf_lock->pid; if (conf_lock->type & server->vals->shared_lock_type) flock->fl_type = F_RDLCK; else flock->fl_type = F_WRLCK; } else if (!cinode->can_cache_brlcks) rc = 1; else flock->fl_type = F_UNLCK; up_read(&cinode->lock_sem); return rc; } static void cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) { struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); down_write(&cinode->lock_sem); list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); } /* * Set the byte-range lock (mandatory style). Returns: * 1) 0, if we set the lock and don't need to request to the server; * 2) 1, if no locks prevent us but we need to request to the server; * 3) -EACCESS, if there is a lock that prevents us and wait is false. */ static int cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, bool wait) { struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); bool exist; int rc = 0; try_again: exist = false; down_write(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, lock->type, &conf_lock, CIFS_LOCK_OP); if (!exist && cinode->can_cache_brlcks) { list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); return rc; } if (!exist) rc = 1; else if (!wait) rc = -EACCES; else { list_add_tail(&lock->blist, &conf_lock->blist); up_write(&cinode->lock_sem); rc = wait_event_interruptible(lock->block_q, (lock->blist.prev == &lock->blist) && (lock->blist.next == &lock->blist)); if (!rc) goto try_again; down_write(&cinode->lock_sem); list_del_init(&lock->blist); } up_write(&cinode->lock_sem); return rc; } /* * Check if there is another lock that prevents us to set the lock (posix * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise. */ static int cifs_posix_lock_test(struct file *file, struct file_lock *flock) { int rc = 0; struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); unsigned char saved_type = flock->fl_type; if ((flock->fl_flags & FL_POSIX) == 0) return 1; down_read(&cinode->lock_sem); posix_test_lock(file, flock); if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) { flock->fl_type = saved_type; rc = 1; } up_read(&cinode->lock_sem); return rc; } /* * Set the byte-range lock (posix style). Returns: * 1) 0, if we set the lock and don't need to request to the server; * 2) 1, if we need to request to the server; * 3) <0, if the error occurs while setting the lock. */ static int cifs_posix_lock_set(struct file *file, struct file_lock *flock) { struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); int rc = 1; if ((flock->fl_flags & FL_POSIX) == 0) return rc; try_again: down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; } rc = posix_lock_file(file, flock, NULL); up_write(&cinode->lock_sem); if (rc == FILE_LOCK_DEFERRED) { rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next); if (!rc) goto try_again; posix_unblock_lock(flock); } return rc; } int cifs_push_mandatory_locks(struct cifsFileInfo *cfile) { unsigned int xid; int rc = 0, stored_rc; struct cifsLockInfo *li, *tmp; struct cifs_tcon *tcon; unsigned int num, max_num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; int types[] = {LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; int i; xid = get_xid(); tcon = tlink_tcon(cfile->tlink); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it for zero before using. */ max_buf = tcon->ses->server->maxBuf; if (!max_buf) { free_xid(xid); return -EINVAL; } max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) { free_xid(xid); return -ENOMEM; } for (i = 0; i < 2; i++) { cur = buf; num = 0; list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (li->type != types[i]) continue; cur->Pid = cpu_to_le16(li->pid); cur->LengthLow = cpu_to_le32((u32)li->length); cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); cur->OffsetLow = cpu_to_le32((u32)li->offset); cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); if (++num == max_num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, (__u8)li->type, 0, num, buf); if (stored_rc) rc = stored_rc; cur = buf; num = 0; } else cur++; } if (num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, (__u8)types[i], 0, num, buf); if (stored_rc) rc = stored_rc; } } kfree(buf); free_xid(xid); return rc; } /* copied from fs/locks.c with a name change */ #define cifs_for_each_lock(inode, lockp) \ for (lockp = &inode->i_flock; *lockp != NULL; \ lockp = &(*lockp)->fl_next) struct lock_to_push { struct list_head llist; __u64 offset; __u64 length; __u32 pid; __u16 netfid; __u8 type; }; static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { struct inode *inode = cfile->dentry->d_inode; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock, **before; unsigned int count = 0, i = 0; int rc = 0, xid, type; struct list_head locks_to_send, *el; struct lock_to_push *lck, *tmp; __u64 length; xid = get_xid(); spin_lock(&inode->i_lock); cifs_for_each_lock(inode, before) { if ((*before)->fl_flags & FL_POSIX) count++; } spin_unlock(&inode->i_lock); INIT_LIST_HEAD(&locks_to_send); /* * Allocating count locks is enough because no FL_POSIX locks can be * added to the list while we are holding cinode->lock_sem that * protects locking operations of this inode. */ for (; i < count; i++) { lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); if (!lck) { rc = -ENOMEM; goto err_out; } list_add_tail(&lck->llist, &locks_to_send); } el = locks_to_send.next; spin_lock(&inode->i_lock); cifs_for_each_lock(inode, before) { flock = *before; if ((flock->fl_flags & FL_POSIX) == 0) continue; if (el == &locks_to_send) { /* * The list ended. We don't have enough allocated * structures - something is really wrong. */ cifs_dbg(VFS, "Can't push all brlocks!\n"); break; } length = 1 + flock->fl_end - flock->fl_start; if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) type = CIFS_RDLCK; else type = CIFS_WRLCK; lck = list_entry(el, struct lock_to_push, llist); lck->pid = flock->fl_pid; lck->netfid = cfile->fid.netfid; lck->length = length; lck->type = type; lck->offset = flock->fl_start; el = el->next; } spin_unlock(&inode->i_lock); list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { int stored_rc; stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, lck->offset, lck->length, NULL, lck->type, 0); if (stored_rc) rc = stored_rc; list_del(&lck->llist); kfree(lck); } out: free_xid(xid); return rc; err_out: list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { list_del(&lck->llist); kfree(lck); } goto out; } static int cifs_push_locks(struct cifsFileInfo *cfile) { struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; /* we are going to update can_cache_brlcks here - need a write access */ down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; } if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else rc = tcon->ses->server->ops->push_mand_locks(cfile); cinode->can_cache_brlcks = false; up_write(&cinode->lock_sem); return rc; } static void cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, bool *wait_flag, struct TCP_Server_Info *server) { if (flock->fl_flags & FL_POSIX) cifs_dbg(FYI, "Posix\n"); if (flock->fl_flags & FL_FLOCK) cifs_dbg(FYI, "Flock\n"); if (flock->fl_flags & FL_SLEEP) { cifs_dbg(FYI, "Blocking lock\n"); *wait_flag = true; } if (flock->fl_flags & FL_ACCESS) cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); if (flock->fl_flags & FL_LEASE) cifs_dbg(FYI, "Lease on file - not implemented yet\n"); if (flock->fl_flags & (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE | FL_CLOSE))) cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags); *type = server->vals->large_lock_type; if (flock->fl_type == F_WRLCK) { cifs_dbg(FYI, "F_WRLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; } else if (flock->fl_type == F_UNLCK) { cifs_dbg(FYI, "F_UNLCK\n"); *type |= server->vals->unlock_lock_type; *unlock = 1; /* Check if unlock includes more than one lock range */ } else if (flock->fl_type == F_RDLCK) { cifs_dbg(FYI, "F_RDLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; } else if (flock->fl_type == F_EXLCK) { cifs_dbg(FYI, "F_EXLCK\n"); *type |= server->vals->exclusive_lock_type; *lock = 1; } else if (flock->fl_type == F_SHLCK) { cifs_dbg(FYI, "F_SHLCK\n"); *type |= server->vals->shared_lock_type; *lock = 1; } else cifs_dbg(FYI, "Unknown type of lock\n"); } static int cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, bool wait_flag, bool posix_lck, unsigned int xid) { int rc = 0; __u64 length = 1 + flock->fl_end - flock->fl_start; struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; __u16 netfid = cfile->fid.netfid; if (posix_lck) { int posix_lock_type; rc = cifs_posix_lock_test(file, flock); if (!rc) return rc; if (type & server->vals->shared_lock_type) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid, flock->fl_start, length, flock, posix_lock_type, wait_flag); return rc; } rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); if (!rc) return rc; /* BB we could chain these into one lock request BB */ rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 1, 0, false); if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 0, 1, false); flock->fl_type = F_UNLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); return 0; } if (type & server->vals->shared_lock_type) { flock->fl_type = F_WRLCK; return 0; } type &= ~server->vals->exclusive_lock_type; rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type | server->vals->shared_lock_type, 1, 0, false); if (rc == 0) { rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type | server->vals->shared_lock_type, 0, 1, false); flock->fl_type = F_RDLCK; if (rc != 0) cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", rc); } else flock->fl_type = F_WRLCK; return 0; } void cifs_move_llist(struct list_head *source, struct list_head *dest) { struct list_head *li, *tmp; list_for_each_safe(li, tmp, source) list_move(li, dest); } void cifs_free_llist(struct list_head *llist) { struct cifsLockInfo *li, *tmp; list_for_each_entry_safe(li, tmp, llist, llist) { cifs_del_lock_waiters(li); list_del(&li->llist); kfree(li); } } int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int xid) { int rc = 0, stored_rc; int types[] = {LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; unsigned int i; unsigned int max_num, num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); struct cifsLockInfo *li, *tmp; __u64 length = 1 + flock->fl_end - flock->fl_start; struct list_head tmp_llist; INIT_LIST_HEAD(&tmp_llist); /* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it for zero before using. */ max_buf = tcon->ses->server->maxBuf; if (!max_buf) return -EINVAL; max_num = (max_buf - sizeof(struct smb_hdr)) / sizeof(LOCKING_ANDX_RANGE); buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) return -ENOMEM; down_write(&cinode->lock_sem); for (i = 0; i < 2; i++) { cur = buf; num = 0; list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset || (flock->fl_start + length) < (li->offset + li->length)) continue; if (current->tgid != li->pid) continue; if (types[i] != li->type) continue; if (cinode->can_cache_brlcks) { /* * We can cache brlock requests - simply remove * a lock from the file's list. */ list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); continue; } cur->Pid = cpu_to_le16(li->pid); cur->LengthLow = cpu_to_le32((u32)li->length); cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); cur->OffsetLow = cpu_to_le32((u32)li->offset); cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); /* * We need to save a lock here to let us add it again to * the file's list if the unlock range request fails on * the server. */ list_move(&li->llist, &tmp_llist); if (++num == max_num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, li->type, num, 0, buf); if (stored_rc) { /* * We failed on the unlock range * request - add all locks from the tmp * list to the head of the file's list. */ cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else /* * The unlock range request succeed - * free the tmp list. */ cifs_free_llist(&tmp_llist); cur = buf; num = 0; } else cur++; } if (num) { stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, types[i], num, 0, buf); if (stored_rc) { cifs_move_llist(&tmp_llist, &cfile->llist->locks); rc = stored_rc; } else cifs_free_llist(&tmp_llist); } } up_write(&cinode->lock_sem); kfree(buf); return rc; } static int cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, bool wait_flag, bool posix_lck, int lock, int unlock, unsigned int xid) { int rc = 0; __u64 length = 1 + flock->fl_end - flock->fl_start; struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct inode *inode = cfile->dentry->d_inode; if (posix_lck) { int posix_lock_type; rc = cifs_posix_lock_set(file, flock); if (!rc || rc < 0) return rc; if (type & server->vals->shared_lock_type) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; if (unlock == 1) posix_lock_type = CIFS_UNLCK; rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, current->tgid, flock->fl_start, length, NULL, posix_lock_type, wait_flag); goto out; } if (lock) { struct cifsLockInfo *lock; lock = cifs_lock_init(flock->fl_start, length, type); if (!lock) return -ENOMEM; rc = cifs_lock_add_if(cfile, lock, wait_flag); if (rc < 0) { kfree(lock); return rc; } if (!rc) goto out; /* * Windows 7 server can delay breaking lease from read to None * if we set a byte-range lock on a file - break it explicitly * before sending the lock to the server to be sure the next * read won't conflict with non-overlapted locks due to * pagereading. */ if (!CIFS_CACHE_WRITE(CIFS_I(inode)) && CIFS_CACHE_READ(CIFS_I(inode))) { cifs_invalidate_mapping(inode); cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n", inode); CIFS_I(inode)->oplock = 0; } rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 1, 0, wait_flag); if (rc) { kfree(lock); return rc; } cifs_lock_add(cfile, lock); } else if (unlock) rc = server->ops->mand_unlock_range(cfile, flock, xid); out: if (flock->fl_flags & FL_POSIX) posix_lock_file_wait(file, flock); return rc; } int cifs_lock(struct file *file, int cmd, struct file_lock *flock) { int rc, xid; int lock = 0, unlock = 0; bool wait_flag = false; bool posix_lck = false; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct cifsInodeInfo *cinode; struct cifsFileInfo *cfile; __u16 netfid; __u32 type; rc = -EACCES; xid = get_xid(); cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n", cmd, flock->fl_flags, flock->fl_type, flock->fl_start, flock->fl_end); cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, tcon->ses->server); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); netfid = cfile->fid.netfid; cinode = CIFS_I(file_inode(file)); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) posix_lck = true; /* * BB add code here to normalize offset and length to account for * negative length which we can not accept over the wire. */ if (IS_GETLK(cmd)) { rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid); free_xid(xid); return rc; } if (!lock && !unlock) { /* * if no lock or unlock then nothing to do since we do not * know what it is */ free_xid(xid); return -EOPNOTSUPP; } rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, xid); free_xid(xid); return rc; } /* * update the file size (if needed) after a write. Should be called with * the inode->i_lock held */ void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written) { loff_t end_of_write = offset + bytes_written; if (end_of_write > cifsi->server_eof) cifsi->server_eof = end_of_write; } static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, size_t write_size, loff_t *offset) { int rc = 0; unsigned int bytes_written = 0; unsigned int total_written; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct TCP_Server_Info *server; unsigned int xid; struct dentry *dentry = open_file->dentry; struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); struct cifs_io_parms io_parms; cifs_sb = CIFS_SB(dentry->d_sb); cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n", write_size, *offset, dentry->d_name.name); tcon = tlink_tcon(open_file->tlink); server = tcon->ses->server; if (!server->ops->sync_write) return -ENOSYS; xid = get_xid(); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; while (rc == -EAGAIN) { struct kvec iov[2]; unsigned int len; if (open_file->invalidHandle) { /* we could deadlock if we called filemap_fdatawait from here so tell reopen_file not to flush data to server now */ rc = cifs_reopen_file(open_file, false); if (rc != 0) break; } len = min((size_t)cifs_sb->wsize, write_size - total_written); /* iov[0] is reserved for smb header */ iov[1].iov_base = (char *)write_data + total_written; iov[1].iov_len = len; io_parms.pid = pid; io_parms.tcon = tcon; io_parms.offset = *offset; io_parms.length = len; rc = server->ops->sync_write(xid, open_file, &io_parms, &bytes_written, iov, 1); } if (rc || (bytes_written == 0)) { if (total_written) break; else { free_xid(xid); return rc; } } else { spin_lock(&dentry->d_inode->i_lock); cifs_update_eof(cifsi, *offset, bytes_written); spin_unlock(&dentry->d_inode->i_lock); *offset += bytes_written; } } cifs_stats_bytes_written(tcon, total_written); if (total_written > 0) { spin_lock(&dentry->d_inode->i_lock); if (*offset > dentry->d_inode->i_size) i_size_write(dentry->d_inode, *offset); spin_unlock(&dentry->d_inode->i_lock); } mark_inode_dirty_sync(dentry->d_inode); free_xid(xid); return total_written; } struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) { struct cifsFileInfo *open_file = NULL; struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; spin_lock(&cifs_file_list_lock); /* we could simply get the first_list_entry since write-only entries are always at the end of the list but since the first entry might have a close pending, we go through the whole list */ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { if (!open_file->invalidHandle) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get_locked(open_file); spin_unlock(&cifs_file_list_lock); return open_file; } /* else might as well continue, and look for another, or simply have the caller reopen it again rather than trying to fix this handle */ } else /* write only file */ break; /* write only files are last so must be done */ } spin_unlock(&cifs_file_list_lock); return NULL; } struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) { struct cifsFileInfo *open_file, *inv_file = NULL; struct cifs_sb_info *cifs_sb; bool any_available = false; int rc; unsigned int refind = 0; /* Having a null inode here (because mapping->host was set to zero by the VFS or MM) should not happen but we had reports of on oops (due to it being zero) during stress testcases so we need to check for it */ if (cifs_inode == NULL) { cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n"); dump_stack(); return NULL; } cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) fsuid_only = false; spin_lock(&cifs_file_list_lock); refind_writable: if (refind > MAX_REOPEN_ATT) { spin_unlock(&cifs_file_list_lock); return NULL; } list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (!any_available && open_file->pid != current->tgid) continue; if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { if (!open_file->invalidHandle) { /* found a good writable file */ cifsFileInfo_get_locked(open_file); spin_unlock(&cifs_file_list_lock); return open_file; } else { if (!inv_file) inv_file = open_file; } } } /* couldn't find useable FH with same pid, try any available */ if (!any_available) { any_available = true; goto refind_writable; } if (inv_file) { any_available = false; cifsFileInfo_get_locked(inv_file); } spin_unlock(&cifs_file_list_lock); if (inv_file) { rc = cifs_reopen_file(inv_file, false); if (!rc) return inv_file; else { spin_lock(&cifs_file_list_lock); list_move_tail(&inv_file->flist, &cifs_inode->openFileList); spin_unlock(&cifs_file_list_lock); cifsFileInfo_put(inv_file); spin_lock(&cifs_file_list_lock); ++refind; goto refind_writable; } } return NULL; } static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; char *write_data; int rc = -EFAULT; int bytes_written = 0; struct inode *inode; struct cifsFileInfo *open_file; if (!mapping || !mapping->host) return -EFAULT; inode = page->mapping->host; offset += (loff_t)from; write_data = kmap(page); write_data += from; if ((to > PAGE_CACHE_SIZE) || (from > to)) { kunmap(page); return -EIO; } /* racing with truncate? */ if (offset > mapping->host->i_size) { kunmap(page); return 0; /* don't care */ } /* check to make sure that we are not extending the file */ if (mapping->host->i_size - offset < (loff_t)to) to = (unsigned)(mapping->host->i_size - offset); open_file = find_writable_file(CIFS_I(mapping->host), false); if (open_file) { bytes_written = cifs_write(open_file, open_file->pid, write_data, to - from, &offset); cifsFileInfo_put(open_file); /* Does mm or vfs already set times? */ inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); if ((bytes_written > 0) && (offset)) rc = 0; else if (bytes_written < 0) rc = bytes_written; } else { cifs_dbg(FYI, "No writeable filehandles for inode\n"); rc = -EIO; } kunmap(page); return rc; } static int cifs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb); bool done = false, scanned = false, range_whole = false; pgoff_t end, index; struct cifs_writedata *wdata; struct TCP_Server_Info *server; struct page *page; int rc = 0; /* * If wsize is smaller than the page cache size, default to writing * one page at a time via cifs_writepage */ if (cifs_sb->wsize < PAGE_CACHE_SIZE) return generic_writepages(mapping, wbc); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = true; scanned = true; } retry: while (!done && index <= end) { unsigned int i, nr_pages, found_pages; pgoff_t next = 0, tofind; struct page **pages; tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; wdata = cifs_writedata_alloc((unsigned int)tofind, cifs_writev_complete); if (!wdata) { rc = -ENOMEM; break; } /* * find_get_pages_tag seems to return a max of 256 on each * iteration, so we must call it several times in order to * fill the array or the wsize is effectively limited to * 256 * PAGE_CACHE_SIZE. */ found_pages = 0; pages = wdata->pages; do { nr_pages = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, tofind, pages); found_pages += nr_pages; tofind -= nr_pages; pages += nr_pages; } while (nr_pages && tofind && index <= end); if (found_pages == 0) { kref_put(&wdata->refcount, cifs_writedata_release); break; } nr_pages = 0; for (i = 0; i < found_pages; i++) { page = wdata->pages[i]; /* * At this point we hold neither mapping->tree_lock nor * lock on the page itself: the page may be truncated or * invalidated (changing page->mapping to NULL), or even * swizzled back from swapper_space to tmpfs file * mapping */ if (nr_pages == 0) lock_page(page); else if (!trylock_page(page)) break; if (unlikely(page->mapping != mapping)) { unlock_page(page); break; } if (!wbc->range_cyclic && page->index > end) { done = true; unlock_page(page); break; } if (next && (page->index != next)) { /* Not next consecutive page */ unlock_page(page); break; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { unlock_page(page); break; } /* * This actually clears the dirty bit in the radix tree. * See cifs_writepage() for more commentary. */ set_page_writeback(page); if (page_offset(page) >= i_size_read(mapping->host)) { done = true; unlock_page(page); end_page_writeback(page); break; } wdata->pages[i] = page; next = page->index + 1; ++nr_pages; } /* reset index to refind any pages skipped */ if (nr_pages == 0) index = wdata->pages[0]->index + 1; /* put any pages we aren't going to use */ for (i = nr_pages; i < found_pages; i++) { page_cache_release(wdata->pages[i]); wdata->pages[i] = NULL; } /* nothing to write? */ if (nr_pages == 0) { kref_put(&wdata->refcount, cifs_writedata_release); continue; } wdata->sync_mode = wbc->sync_mode; wdata->nr_pages = nr_pages; wdata->offset = page_offset(wdata->pages[0]); wdata->pagesz = PAGE_CACHE_SIZE; wdata->tailsz = min(i_size_read(mapping->host) - page_offset(wdata->pages[nr_pages - 1]), (loff_t)PAGE_CACHE_SIZE); wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; do { if (wdata->cfile != NULL) cifsFileInfo_put(wdata->cfile); wdata->cfile = find_writable_file(CIFS_I(mapping->host), false); if (!wdata->cfile) { cifs_dbg(VFS, "No writable handles for inode\n"); rc = -EBADF; break; } wdata->pid = wdata->cfile->pid; server = tlink_tcon(wdata->cfile->tlink)->ses->server; rc = server->ops->async_writev(wdata, cifs_writedata_release); } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); for (i = 0; i < nr_pages; ++i) unlock_page(wdata->pages[i]); /* send failure -- clean up the mess */ if (rc != 0) { for (i = 0; i < nr_pages; ++i) { if (rc == -EAGAIN) redirty_page_for_writepage(wbc, wdata->pages[i]); else SetPageError(wdata->pages[i]); end_page_writeback(wdata->pages[i]); page_cache_release(wdata->pages[i]); } if (rc != -EAGAIN) mapping_set_error(mapping, rc); } kref_put(&wdata->refcount, cifs_writedata_release); wbc->nr_to_write -= nr_pages; if (wbc->nr_to_write <= 0) done = true; index = next; } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = true; index = 0; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; return rc; } static int cifs_writepage_locked(struct page *page, struct writeback_control *wbc) { int rc; unsigned int xid; xid = get_xid(); /* BB add check for wbc flags */ page_cache_get(page); if (!PageUptodate(page)) cifs_dbg(FYI, "ppw - page not up to date\n"); /* * Set the "writeback" flag, and clear "dirty" in the radix tree. * * A writepage() implementation always needs to do either this, * or re-dirty the page with "redirty_page_for_writepage()" in * the case of a failure. * * Just unlocking the page will cause the radix tree tag-bits * to fail to update with the state of the page correctly. */ set_page_writeback(page); retry_write: rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) goto retry_write; else if (rc == -EAGAIN) redirty_page_for_writepage(wbc, page); else if (rc != 0) SetPageError(page); else SetPageUptodate(page); end_page_writeback(page); page_cache_release(page); free_xid(xid); return rc; } static int cifs_writepage(struct page *page, struct writeback_control *wbc) { int rc = cifs_writepage_locked(page, wbc); unlock_page(page); return rc; } static int cifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int rc; struct inode *inode = mapping->host; struct cifsFileInfo *cfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); __u32 pid; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = cfile->pid; else pid = current->tgid; cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n", page, pos, copied); if (PageChecked(page)) { if (copied == len) SetPageUptodate(page); ClearPageChecked(page); } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) SetPageUptodate(page); if (!PageUptodate(page)) { char *page_data; unsigned offset = pos & (PAGE_CACHE_SIZE - 1); unsigned int xid; xid = get_xid(); /* this is probably better than directly calling partialpage_write since in this function the file handle is known which we might as well leverage */ /* BB check if anything else missing out of ppw such as updating last write time */ page_data = kmap(page); rc = cifs_write(cfile, pid, page_data + offset, copied, &pos); /* if (rc < 0) should we set writebehind rc? */ kunmap(page); free_xid(xid); } else { rc = copied; pos += copied; set_page_dirty(page); } if (rc > 0) { spin_lock(&inode->i_lock); if (pos > inode->i_size) i_size_write(inode, pos); spin_unlock(&inode->i_lock); } unlock_page(page); page_cache_release(page); return rc; } int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, int datasync) { unsigned int xid; int rc = 0; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsFileInfo *smbfile = file->private_data; struct inode *inode = file_inode(file); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); rc = filemap_write_and_wait_range(inode->i_mapping, start, end); if (rc) return rc; mutex_lock(&inode->i_mutex); xid = get_xid(); cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n", file->f_path.dentry->d_name.name, datasync); if (!CIFS_CACHE_READ(CIFS_I(inode))) { rc = cifs_invalidate_mapping(inode); if (rc) { cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc); rc = 0; /* don't care about it in fsync */ } } tcon = tlink_tcon(smbfile->tlink); if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { server = tcon->ses->server; if (server->ops->flush) rc = server->ops->flush(xid, tcon, &smbfile->fid); else rc = -ENOSYS; } free_xid(xid); mutex_unlock(&inode->i_mutex); return rc; } int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { unsigned int xid; int rc = 0; struct cifs_tcon *tcon; struct TCP_Server_Info *server; struct cifsFileInfo *smbfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); struct inode *inode = file->f_mapping->host; rc = filemap_write_and_wait_range(inode->i_mapping, start, end); if (rc) return rc; mutex_lock(&inode->i_mutex); xid = get_xid(); cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n", file->f_path.dentry->d_name.name, datasync); tcon = tlink_tcon(smbfile->tlink); if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { server = tcon->ses->server; if (server->ops->flush) rc = server->ops->flush(xid, tcon, &smbfile->fid); else rc = -ENOSYS; } free_xid(xid); mutex_unlock(&inode->i_mutex); return rc; } /* * As file closes, flush all cached write data for this inode checking * for write behind errors. */ int cifs_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); int rc = 0; if (file->f_mode & FMODE_WRITE) rc = filemap_write_and_wait(inode->i_mapping); cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc); return rc; } static int cifs_write_allocate_pages(struct page **pages, unsigned long num_pages) { int rc = 0; unsigned long i; for (i = 0; i < num_pages; i++) { pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); if (!pages[i]) { /* * save number of pages we have already allocated and * return with ENOMEM error */ num_pages = i; rc = -ENOMEM; break; } } if (rc) { for (i = 0; i < num_pages; i++) put_page(pages[i]); } return rc; } static inline size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) { size_t num_pages; size_t clen; clen = min_t(const size_t, len, wsize); num_pages = DIV_ROUND_UP(clen, PAGE_SIZE); if (cur_len) *cur_len = clen; return num_pages; } static void cifs_uncached_writedata_release(struct kref *refcount) { int i; struct cifs_writedata *wdata = container_of(refcount, struct cifs_writedata, refcount); for (i = 0; i < wdata->nr_pages; i++) put_page(wdata->pages[i]); cifs_writedata_release(refcount); } static void cifs_uncached_writev_complete(struct work_struct *work) { struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; struct cifsInodeInfo *cifsi = CIFS_I(inode); spin_lock(&inode->i_lock); cifs_update_eof(cifsi, wdata->offset, wdata->bytes); if (cifsi->server_eof > inode->i_size) i_size_write(inode, cifsi->server_eof); spin_unlock(&inode->i_lock); complete(&wdata->done); kref_put(&wdata->refcount, cifs_uncached_writedata_release); } /* attempt to send write to server, retry on any -EAGAIN errors */ static int cifs_uncached_retry_writev(struct cifs_writedata *wdata) { int rc; struct TCP_Server_Info *server; server = tlink_tcon(wdata->cfile->tlink)->ses->server; do { if (wdata->cfile->invalidHandle) { rc = cifs_reopen_file(wdata->cfile, false); if (rc != 0) continue; } rc = server->ops->async_writev(wdata, cifs_uncached_writedata_release); } while (rc == -EAGAIN); return rc; } static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { unsigned long nr_pages, i; size_t copied, len, cur_len; ssize_t total_written = 0; loff_t offset; struct iov_iter it; struct cifsFileInfo *open_file; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct cifs_writedata *wdata, *tmp; struct list_head wdata_list; int rc; pid_t pid; len = iov_length(iov, nr_segs); if (!len) return 0; rc = generic_write_checks(file, poffset, &len, 0); if (rc) return rc; INIT_LIST_HEAD(&wdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (!tcon->ses->server->ops->async_writev) return -ENOSYS; offset = *poffset; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; iov_iter_init(&it, iov, nr_segs, len, 0); do { size_t save_len; nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len); wdata = cifs_writedata_alloc(nr_pages, cifs_uncached_writev_complete); if (!wdata) { rc = -ENOMEM; break; } rc = cifs_write_allocate_pages(wdata->pages, nr_pages); if (rc) { kfree(wdata); break; } save_len = cur_len; for (i = 0; i < nr_pages; i++) { copied = min_t(const size_t, cur_len, PAGE_SIZE); copied = iov_iter_copy_from_user(wdata->pages[i], &it, 0, copied); cur_len -= copied; iov_iter_advance(&it, copied); } cur_len = save_len - cur_len; wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; wdata->offset = (__u64)offset; wdata->cfile = cifsFileInfo_get(open_file); wdata->pid = pid; wdata->bytes = cur_len; wdata->pagesz = PAGE_SIZE; wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); rc = cifs_uncached_retry_writev(wdata); if (rc) { kref_put(&wdata->refcount, cifs_uncached_writedata_release); break; } list_add_tail(&wdata->list, &wdata_list); offset += cur_len; len -= cur_len; } while (len > 0); /* * If at least one write was successfully sent, then discard any rc * value from the later writes. If the other write succeeds, then * we'll end up returning whatever was written. If it fails, then * we'll get a new rc value from that. */ if (!list_empty(&wdata_list)) rc = 0; /* * Wait for and collect replies for any successful sends in order of * increasing offset. Once an error is hit or we get a fatal signal * while waiting, then return without waiting for any more replies. */ restart_loop: list_for_each_entry_safe(wdata, tmp, &wdata_list, list) { if (!rc) { /* FIXME: freezable too? */ rc = wait_for_completion_killable(&wdata->done); if (rc) rc = -EINTR; else if (wdata->result) rc = wdata->result; else total_written += wdata->bytes; /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_uncached_retry_writev(wdata); goto restart_loop; } } list_del_init(&wdata->list); kref_put(&wdata->refcount, cifs_uncached_writedata_release); } if (total_written > 0) *poffset += total_written; cifs_stats_bytes_written(tcon, total_written); return total_written ? total_written : (ssize_t)rc; } ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { ssize_t written; struct inode *inode; inode = file_inode(iocb->ki_filp); /* * BB - optimize the way when signing is disabled. We can drop this * extra memory-to-memory copying and use iovec buffers for constructing * write request. */ written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos); if (written > 0) { CIFS_I(inode)->invalid_mapping = true; iocb->ki_pos = pos; } return written; } static ssize_t cifs_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct inode *inode = file->f_mapping->host; struct cifsInodeInfo *cinode = CIFS_I(inode); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; ssize_t rc = -EACCES; BUG_ON(iocb->ki_pos != pos); /* * We need to hold the sem to be sure nobody modifies lock list * with a brlock that prevents writing. */ down_read(&cinode->lock_sem); if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), server->vals->exclusive_lock_type, NULL, CIFS_WRITE_OP)) { mutex_lock(&inode->i_mutex); rc = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); } if (rc > 0) { ssize_t err; err = generic_write_sync(file, pos, rc); if (err < 0 && rc > 0) rc = err; } up_read(&cinode->lock_sem); return rc; } ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = file_inode(iocb->ki_filp); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsFileInfo *cfile = (struct cifsFileInfo *) iocb->ki_filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); ssize_t written; if (CIFS_CACHE_WRITE(cinode)) { if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) return generic_file_aio_write(iocb, iov, nr_segs, pos); return cifs_writev(iocb, iov, nr_segs, pos); } /* * For non-oplocked files in strict cache mode we need to write the data * to the server exactly from the pos to pos+len-1 rather than flush all * affected pages because it may cause a error with mandatory locks on * these pages but not on the region from pos to ppos+len-1. */ written = cifs_user_writev(iocb, iov, nr_segs, pos); if (written > 0 && CIFS_CACHE_READ(cinode)) { /* * Windows 7 server can delay breaking level2 oplock if a write * request comes - break it on the client to prevent reading * an old data. */ cifs_invalidate_mapping(inode); cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n", inode); cinode->oplock = 0; } return written; } static struct cifs_readdata * cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete) { struct cifs_readdata *rdata; rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages), GFP_KERNEL); if (rdata != NULL) { kref_init(&rdata->refcount); INIT_LIST_HEAD(&rdata->list); init_completion(&rdata->done); INIT_WORK(&rdata->work, complete); } return rdata; } void cifs_readdata_release(struct kref *refcount) { struct cifs_readdata *rdata = container_of(refcount, struct cifs_readdata, refcount); if (rdata->cfile) cifsFileInfo_put(rdata->cfile); kfree(rdata); } static int cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages) { int rc = 0; struct page *page; unsigned int i; for (i = 0; i < nr_pages; i++) { page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); if (!page) { rc = -ENOMEM; break; } rdata->pages[i] = page; } if (rc) { for (i = 0; i < nr_pages; i++) { put_page(rdata->pages[i]); rdata->pages[i] = NULL; } } return rc; } static void cifs_uncached_readdata_release(struct kref *refcount) { struct cifs_readdata *rdata = container_of(refcount, struct cifs_readdata, refcount); unsigned int i; for (i = 0; i < rdata->nr_pages; i++) { put_page(rdata->pages[i]); rdata->pages[i] = NULL; } cifs_readdata_release(refcount); } static int cifs_retry_async_readv(struct cifs_readdata *rdata) { int rc; struct TCP_Server_Info *server; server = tlink_tcon(rdata->cfile->tlink)->ses->server; do { if (rdata->cfile->invalidHandle) { rc = cifs_reopen_file(rdata->cfile, true); if (rc != 0) continue; } rc = server->ops->async_readv(rdata); } while (rc == -EAGAIN); return rc; } /** * cifs_readdata_to_iov - copy data from pages in response to an iovec * @rdata: the readdata response with list of pages holding data * @iov: vector in which we should copy the data * @nr_segs: number of segments in vector * @offset: offset into file of the first iovec * @copied: used to return the amount of data copied to the iov * * This function copies data from a list of pages in a readdata response into * an array of iovecs. It will first calculate where the data should go * based on the info in the readdata and then copy the data into that spot. */ static ssize_t cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov, unsigned long nr_segs, loff_t offset, ssize_t *copied) { int rc = 0; struct iov_iter ii; size_t pos = rdata->offset - offset; ssize_t remaining = rdata->bytes; unsigned char *pdata; unsigned int i; /* set up iov_iter and advance to the correct offset */ iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0); iov_iter_advance(&ii, pos); *copied = 0; for (i = 0; i < rdata->nr_pages; i++) { ssize_t copy; struct page *page = rdata->pages[i]; /* copy a whole page or whatever's left */ copy = min_t(ssize_t, remaining, PAGE_SIZE); /* ...but limit it to whatever space is left in the iov */ copy = min_t(ssize_t, copy, iov_iter_count(&ii)); /* go while there's data to be copied and no errors */ if (copy && !rc) { pdata = kmap(page); rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset, (int)copy); kunmap(page); if (!rc) { *copied += copy; remaining -= copy; iov_iter_advance(&ii, copy); } } } return rc; } static void cifs_uncached_readv_complete(struct work_struct *work) { struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); complete(&rdata->done); kref_put(&rdata->refcount, cifs_uncached_readdata_release); } static int cifs_uncached_read_into_pages(struct TCP_Server_Info *server, struct cifs_readdata *rdata, unsigned int len) { int total_read = 0, result = 0; unsigned int i; unsigned int nr_pages = rdata->nr_pages; struct kvec iov; rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; if (len >= PAGE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); iov.iov_len = PAGE_SIZE; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); iov.iov_len = len; cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", i, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, '\0', PAGE_SIZE - len); rdata->tailsz = len; len = 0; } else { /* no need to hold page hostage */ rdata->pages[i] = NULL; rdata->nr_pages--; put_page(page); continue; } result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); kunmap(page); if (result < 0) break; total_read += result; } return total_read > 0 ? total_read : result; } static ssize_t cifs_iovec_read(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { ssize_t rc; size_t len, cur_len; ssize_t total_read = 0; loff_t offset = *poffset; unsigned int npages; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct cifsFileInfo *open_file; struct cifs_readdata *rdata, *tmp; struct list_head rdata_list; pid_t pid; if (!nr_segs) return 0; len = iov_length(iov, nr_segs); if (!len) return 0; INIT_LIST_HEAD(&rdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (!tcon->ses->server->ops->async_readv) return -ENOSYS; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cifs_dbg(FYI, "attempting read on write only file instance\n"); do { cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); /* allocate a readdata struct */ rdata = cifs_readdata_alloc(npages, cifs_uncached_readv_complete); if (!rdata) { rc = -ENOMEM; goto error; } rc = cifs_read_allocate_pages(rdata, npages); if (rc) goto error; rdata->cfile = cifsFileInfo_get(open_file); rdata->nr_pages = npages; rdata->offset = offset; rdata->bytes = cur_len; rdata->pid = pid; rdata->pagesz = PAGE_SIZE; rdata->read_into_pages = cifs_uncached_read_into_pages; rc = cifs_retry_async_readv(rdata); error: if (rc) { kref_put(&rdata->refcount, cifs_uncached_readdata_release); break; } list_add_tail(&rdata->list, &rdata_list); offset += cur_len; len -= cur_len; } while (len > 0); /* if at least one read request send succeeded, then reset rc */ if (!list_empty(&rdata_list)) rc = 0; /* the loop below should proceed in the order of increasing offsets */ restart_loop: list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { if (!rc) { ssize_t copied; /* FIXME: freezable sleep too? */ rc = wait_for_completion_killable(&rdata->done); if (rc) rc = -EINTR; else if (rdata->result) rc = rdata->result; else { rc = cifs_readdata_to_iov(rdata, iov, nr_segs, *poffset, &copied); total_read += copied; } /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_retry_async_readv(rdata); goto restart_loop; } } list_del_init(&rdata->list); kref_put(&rdata->refcount, cifs_uncached_readdata_release); } cifs_stats_bytes_read(tcon, total_read); *poffset += total_read; /* mask nodata case */ if (rc == -ENODATA) rc = 0; return total_read ? total_read : rc; } ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { ssize_t read; read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos); if (read > 0) iocb->ki_pos = pos; return read; } ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = file_inode(iocb->ki_filp); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsFileInfo *cfile = (struct cifsFileInfo *) iocb->ki_filp->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = -EACCES; /* * In strict cache mode we need to read from the server all the time * if we don't have level II oplock because the server can delay mtime * change - so we can't make a decision about inode invalidating. * And we can also fail with pagereading if there are mandatory locks * on pages affected by this read but not on the region from pos to * pos+len-1. */ if (!CIFS_CACHE_READ(cinode)) return cifs_user_readv(iocb, iov, nr_segs, pos); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) return generic_file_aio_read(iocb, iov, nr_segs, pos); /* * We need to hold the sem to be sure nobody modifies lock list * with a brlock that prevents reading. */ down_read(&cinode->lock_sem); if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), tcon->ses->server->vals->shared_lock_type, NULL, CIFS_READ_OP)) rc = generic_file_aio_read(iocb, iov, nr_segs, pos); up_read(&cinode->lock_sem); return rc; } static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) { int rc = -EACCES; unsigned int bytes_read = 0; unsigned int total_read; unsigned int current_read_size; unsigned int rsize; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct TCP_Server_Info *server; unsigned int xid; char *cur_offset; struct cifsFileInfo *open_file; struct cifs_io_parms io_parms; int buf_type = CIFS_NO_BUFFER; __u32 pid; xid = get_xid(); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); /* FIXME: set up handlers for larger reads and/or convert to async */ rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize); if (file->private_data == NULL) { rc = -EBADF; free_xid(xid); return rc; } open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); server = tcon->ses->server; if (!server->ops->sync_read) { free_xid(xid); return -ENOSYS; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cifs_dbg(FYI, "attempting read on write only file instance\n"); for (total_read = 0, cur_offset = read_data; read_size > total_read; total_read += bytes_read, cur_offset += bytes_read) { current_read_size = min_t(uint, read_size - total_read, rsize); /* * For windows me and 9x we do not want to request more than it * negotiated since it will refuse the read then. */ if ((tcon->ses) && !(tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)) { current_read_size = min_t(uint, current_read_size, CIFSMaxBufSize); } rc = -EAGAIN; while (rc == -EAGAIN) { if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); if (rc != 0) break; } io_parms.pid = pid; io_parms.tcon = tcon; io_parms.offset = *offset; io_parms.length = current_read_size; rc = server->ops->sync_read(xid, open_file, &io_parms, &bytes_read, &cur_offset, &buf_type); } if (rc || (bytes_read == 0)) { if (total_read) { break; } else { free_xid(xid); return rc; } } else { cifs_stats_bytes_read(tcon, total_read); *offset += bytes_read; } } free_xid(xid); return total_read; } /* * If the page is mmap'ed into a process' page tables, then we need to make * sure that it doesn't change while being written back. */ static int cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; lock_page(page); return VM_FAULT_LOCKED; } static struct vm_operations_struct cifs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = cifs_page_mkwrite, .remap_pages = generic_file_remap_pages, }; int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; struct inode *inode = file_inode(file); xid = get_xid(); if (!CIFS_CACHE_READ(CIFS_I(inode))) { rc = cifs_invalidate_mapping(inode); if (rc) return rc; } rc = generic_file_mmap(file, vma); if (rc == 0) vma->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; } int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; xid = get_xid(); rc = cifs_revalidate_file(file); if (rc) { cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", rc); free_xid(xid); return rc; } rc = generic_file_mmap(file, vma); if (rc == 0) vma->vm_ops = &cifs_file_vm_ops; free_xid(xid); return rc; } static void cifs_readv_complete(struct work_struct *work) { unsigned int i; struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); for (i = 0; i < rdata->nr_pages; i++) { struct page *page = rdata->pages[i]; lru_cache_add_file(page); if (rdata->result == 0) { flush_dcache_page(page); SetPageUptodate(page); } unlock_page(page); if (rdata->result == 0) cifs_readpage_to_fscache(rdata->mapping->host, page); page_cache_release(page); rdata->pages[i] = NULL; } kref_put(&rdata->refcount, cifs_readdata_release); } static int cifs_readpages_read_into_pages(struct TCP_Server_Info *server, struct cifs_readdata *rdata, unsigned int len) { int total_read = 0, result = 0; unsigned int i; u64 eof; pgoff_t eof_index; unsigned int nr_pages = rdata->nr_pages; struct kvec iov; /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); rdata->tailsz = PAGE_CACHE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; if (len >= PAGE_CACHE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); iov.iov_len = PAGE_CACHE_SIZE; cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); len -= PAGE_CACHE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); iov.iov_len = len; cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, '\0', PAGE_CACHE_SIZE - len); rdata->tailsz = len; len = 0; } else if (page->index > eof_index) { /* * The VFS will not try to do readahead past the * i_size, but it's possible that we have outstanding * writes with gaps in the middle and the i_size hasn't * caught up yet. Populate those with zeroed out pages * to prevent the VFS from repeatedly attempting to * fill them until the writes are flushed. */ zero_user(page, 0, PAGE_CACHE_SIZE); lru_cache_add_file(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); page_cache_release(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; } else { /* no need to hold page hostage */ lru_cache_add_file(page); unlock_page(page); page_cache_release(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; } result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); kunmap(page); if (result < 0) break; total_read += result; } return total_read > 0 ? total_read : result; } static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); unsigned int rsize = cifs_sb->rsize; pid_t pid; /* * Give up immediately if rsize is too small to read an entire page. * The VFS will fall back to readpage. We should never reach this * point however since we set ra_pages to 0 when the rsize is smaller * than a cache page. */ if (unlikely(rsize < PAGE_CACHE_SIZE)) return 0; /* * Reads as many pages as possible from fscache. Returns -ENOBUFS * immediately if the cookie is negative * * After this point, every page in the list might have PG_fscache set, * so we will need to clean that up off of every page we don't use. */ rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, &num_pages); if (rc == 0) return rc; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; rc = 0; INIT_LIST_HEAD(&tmplist); cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", __func__, file, mapping, num_pages); /* * Start with the page at end of list and move it to private * list. Do the same with any following pages until we hit * the rsize limit, hit an index discontinuity, or run out of * pages. Issue the async read and then start the loop again * until the list is empty. * * Note that list order is important. The page_list is in * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ while (!list_empty(page_list)) { unsigned int i; unsigned int bytes = PAGE_CACHE_SIZE; unsigned int expected_index; unsigned int nr_pages = 1; loff_t offset; struct page *page, *tpage; struct cifs_readdata *rdata; page = list_entry(page_list->prev, struct page, lru); /* * Lock the page and put it in the cache. Since no one else * should have access to this page, we're safe to simply set * PG_locked without checking it first. */ __set_page_locked(page); rc = add_to_page_cache_locked(page, mapping, page->index, GFP_KERNEL); /* give up if we can't stick it in the cache */ if (rc) { __clear_page_locked(page); break; } /* move first page to the tmplist */ offset = (loff_t)page->index << PAGE_CACHE_SHIFT; list_move_tail(&page->lru, &tmplist); /* now try and add more pages onto the request */ expected_index = page->index + 1; list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { /* discontinuity ? */ if (page->index != expected_index) break; /* would this page push the read over the rsize? */ if (bytes + PAGE_CACHE_SIZE > rsize) break; __set_page_locked(page); if (add_to_page_cache_locked(page, mapping, page->index, GFP_KERNEL)) { __clear_page_locked(page); break; } list_move_tail(&page->lru, &tmplist); bytes += PAGE_CACHE_SIZE; expected_index++; nr_pages++; } rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); if (!rdata) { /* best to give up if we're out of mem */ list_for_each_entry_safe(page, tpage, &tmplist, lru) { list_del(&page->lru); lru_cache_add_file(page); unlock_page(page); page_cache_release(page); } rc = -ENOMEM; break; } rdata->cfile = cifsFileInfo_get(open_file); rdata->mapping = mapping; rdata->offset = offset; rdata->bytes = bytes; rdata->pid = pid; rdata->pagesz = PAGE_CACHE_SIZE; rdata->read_into_pages = cifs_readpages_read_into_pages; list_for_each_entry_safe(page, tpage, &tmplist, lru) { list_del(&page->lru); rdata->pages[rdata->nr_pages++] = page; } rc = cifs_retry_async_readv(rdata); if (rc != 0) { for (i = 0; i < rdata->nr_pages; i++) { page = rdata->pages[i]; lru_cache_add_file(page); unlock_page(page); page_cache_release(page); } kref_put(&rdata->refcount, cifs_readdata_release); break; } kref_put(&rdata->refcount, cifs_readdata_release); } /* Any pages that have been shown to fscache but didn't get added to * the pagecache must be uncached before they get returned to the * allocator. */ cifs_fscache_readpages_cancel(mapping->host, page_list); return rc; } /* * cifs_readpage_worker must be called with the page pinned */ static int cifs_readpage_worker(struct file *file, struct page *page, loff_t *poffset) { char *read_data; int rc; /* Is the page cached? */ rc = cifs_readpage_from_fscache(file_inode(file), page); if (rc == 0) goto read_complete; read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); if (rc < 0) goto io_error; else cifs_dbg(FYI, "Bytes read %d\n", rc); file_inode(file)->i_atime = current_fs_time(file_inode(file)->i_sb); if (PAGE_CACHE_SIZE > rc) memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); /* send this page to the cache */ cifs_readpage_to_fscache(file_inode(file), page); rc = 0; io_error: kunmap(page); unlock_page(page); read_complete: return rc; } static int cifs_readpage(struct file *file, struct page *page) { loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; int rc = -EACCES; unsigned int xid; xid = get_xid(); if (file->private_data == NULL) { rc = -EBADF; free_xid(xid); return rc; } cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n", page, (int)offset, (int)offset); rc = cifs_readpage_worker(file, page, &offset); free_xid(xid); return rc; } static int is_inode_writable(struct cifsInodeInfo *cifs_inode) { struct cifsFileInfo *open_file; spin_lock(&cifs_file_list_lock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { spin_unlock(&cifs_file_list_lock); return 1; } } spin_unlock(&cifs_file_list_lock); return 0; } /* We do not want to update the file size from server for inodes open for write - to avoid races with writepage extending the file - in the future we could consider allowing refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) { if (!cifsInode) return true; if (is_inode_writable(cifsInode)) { /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ return true; } if (i_size_read(&cifsInode->vfs_inode) < end_of_file) return true; return false; } else return true; } static int cifs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int oncethru = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; loff_t offset = pos & (PAGE_CACHE_SIZE - 1); loff_t page_start = pos & PAGE_MASK; loff_t i_size; struct page *page; int rc = 0; cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); start: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { rc = -ENOMEM; goto out; } if (PageUptodate(page)) goto out; /* * If we write a full page it will be up to date, no need to read from * the server. If the write is short, we'll end up doing a sync write * instead. */ if (len == PAGE_CACHE_SIZE) goto out; /* * optimize away the read when we have an oplock, and we're not * expecting to use any of the data we'd be reading in. That * is, when the page lies beyond the EOF, or straddles the EOF * and the write will cover all of the existing data. */ if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { i_size = i_size_read(mapping->host); if (page_start >= i_size || (offset == 0 && (pos + len) >= i_size)) { zero_user_segments(page, 0, offset, offset + len, PAGE_CACHE_SIZE); /* * PageChecked means that the parts of the page * to which we're not writing are considered up * to date. Once the data is copied to the * page, it can be set uptodate. */ SetPageChecked(page); goto out; } } if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { /* * might as well read a page, it is fast enough. If we get * an error, we don't need to return it. cifs_write_end will * do a sync write instead since PG_uptodate isn't set. */ cifs_readpage_worker(file, page, &page_start); page_cache_release(page); oncethru = 1; goto start; } else { /* we could try using another file handle if there is one - but how would we lock it to prevent close of that handle racing with this read? In any case this will be written out by write_end so is fine */ } out: *pagep = page; return rc; } static int cifs_release_page(struct page *page, gfp_t gfp) { if (PagePrivate(page)) return 0; return cifs_fscache_release_page(page, gfp); } static void cifs_invalidate_page(struct page *page, unsigned int offset, unsigned int length) { struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); if (offset == 0 && length == PAGE_CACHE_SIZE) cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); } static int cifs_launder_page(struct page *page) { int rc = 0; loff_t range_start = page_offset(page); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, .range_start = range_start, .range_end = range_end, }; cifs_dbg(FYI, "Launder page: %p\n", page); if (clear_page_dirty_for_io(page)) rc = cifs_writepage_locked(page, &wbc); cifs_fscache_invalidate_page(page, page->mapping->host); return rc; } void cifs_oplock_break(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = cfile->dentry->d_inode; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", inode); cinode->oplock = 0; } if (inode && S_ISREG(inode->i_mode)) { if (CIFS_CACHE_READ(cinode)) break_lease(inode, O_RDONLY); else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); if (!CIFS_CACHE_READ(cinode)) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_invalidate_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); } rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc); /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do * not bother sending an oplock release if session to server still is * disconnected since oplock already released by the server */ if (!cfile->oplock_break_cancelled) { rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } } /* * The presence of cifs_direct_io() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. * * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests * so this method should never be called. * * Direct IO is not yet supported in the cached mode. */ static ssize_t cifs_direct_io(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) { /* * FIXME * Eventually need to support direct IO for non forcedirectio mounts */ return -EINVAL; } const struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage, .readpages = cifs_readpages, .writepage = cifs_writepage, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .set_page_dirty = __set_page_dirty_nobuffers, .releasepage = cifs_release_page, .direct_IO = cifs_direct_io, .invalidatepage = cifs_invalidate_page, .launder_page = cifs_launder_page, }; /* * cifs_readpages requires the server to support a buffer large enough to * contain the header plus one complete page of data. Otherwise, we need * to leave cifs_readpages out of the address space operations. */ const struct address_space_operations cifs_addr_ops_smallbuf = { .readpage = cifs_readpage, .writepage = cifs_writepage, .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, .set_page_dirty = __set_page_dirty_nobuffers, .releasepage = cifs_release_page, .invalidatepage = cifs_invalidate_page, .launder_page = cifs_launder_page, };
./CrossVul/dataset_final_sorted/CWE-119/c/bad_2021_0
crossvul-cpp_data_good_4777_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % JJJJJ PPPP EEEEE GGGG % % J P P E G % % J PPPP EEE G GG % % J J P E G G % % JJJ P EEEEE GGG % % % % % % Read/Write JPEG Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This software is based in part on the work of the Independent JPEG Group. % See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and % licensing restrictions. Blob support contributed by Glenn Randers-Pehrson. % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" #include <setjmp.h> #if defined(MAGICKCORE_JPEG_DELEGATE) #define JPEG_INTERNAL_OPTIONS #if defined(__MINGW32__) || defined(__MINGW64__) # define XMD_H 1 /* Avoid conflicting typedef for INT32 */ #endif #undef HAVE_STDLIB_H #include "jpeglib.h" #include "jerror.h" #endif /* Define declarations. */ #define ICC_MARKER (JPEG_APP0+2) #define ICC_PROFILE "ICC_PROFILE" #define IPTC_MARKER (JPEG_APP0+13) #define XML_MARKER (JPEG_APP0+1) #define MaxBufferExtent 16384 /* Typedef declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) typedef struct _DestinationManager { struct jpeg_destination_mgr manager; Image *image; JOCTET *buffer; } DestinationManager; typedef struct _ErrorManager { Image *image; MagickBooleanType finished; StringInfo *profile; jmp_buf error_recovery; } ErrorManager; typedef struct _SourceManager { struct jpeg_source_mgr manager; Image *image; JOCTET *buffer; boolean start_of_blob; } SourceManager; #endif typedef struct _QuantizationTable { char *slot, *description; size_t width, height; double divisor; unsigned int *levels; } QuantizationTable; /* Forward declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) static MagickBooleanType WriteJPEGImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J P E G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJPEG() returns MagickTrue if the image format type, identified by the % magick string, is JPEG. % % The format of the IsJPEG method is: % % MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) { if (length < 3) return(MagickFalse); if (memcmp(magick,"\377\330\377",3) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJPEGImage() reads a JPEG image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer to % the new image. % % The format of the ReadJPEGImage method is: % % Image *ReadJPEGImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static boolean FillInputBuffer(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image, MaxBufferExtent,source->buffer); if (source->manager.bytes_in_buffer == 0) { if (source->start_of_blob != FALSE) ERREXIT(cinfo,JERR_INPUT_EMPTY); WARNMS(cinfo,JWRN_JPEG_EOF); source->buffer[0]=(JOCTET) 0xff; source->buffer[1]=(JOCTET) JPEG_EOI; source->manager.bytes_in_buffer=2; } source->manager.next_input_byte=source->buffer; source->start_of_blob=FALSE; return(TRUE); } static int GetCharacter(j_decompress_ptr jpeg_info) { if (jpeg_info->src->bytes_in_buffer == 0) (void) (*jpeg_info->src->fill_input_buffer)(jpeg_info); jpeg_info->src->bytes_in_buffer--; return((int) GETJOCTET(*jpeg_info->src->next_input_byte++)); } static void InitializeSource(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->start_of_blob=TRUE; } static MagickBooleanType IsITUFaxImage(const Image *image) { const StringInfo *profile; const unsigned char *datum; profile=GetImageProfile(image,"8bim"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if (GetStringInfoLength(profile) < 5) return(MagickFalse); datum=GetStringInfoDatum(profile); if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) && (datum[3] == 0x41) && (datum[4] == 0x58)) return(MagickTrue); return(MagickFalse); } static void JPEGErrorHandler(j_common_ptr jpeg_info) { char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; (jpeg_info->err->format_message)(jpeg_info,message); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); if (error_manager->finished != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,(char *) message,"`%s'",image->filename); else (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,(char *) message,"`%s'",image->filename); longjmp(error_manager->error_recovery,1); } static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level) { #define JPEGExcessiveWarnings 1000 char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; if (level < 0) { /* Process warning message. */ (jpeg_info->err->format_message)(jpeg_info,message); if (jpeg_info->err->num_warnings++ > JPEGExcessiveWarnings) JPEGErrorHandler(jpeg_info); ThrowBinaryException(CorruptImageWarning,(char *) message, image->filename); } else if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level)) { /* Process trace message. */ (jpeg_info->err->format_message)(jpeg_info,message); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); } return(MagickTrue); } static boolean ReadComment(j_decompress_ptr jpeg_info) { ErrorManager *error_manager; Image *image; register unsigned char *p; register ssize_t i; size_t length; StringInfo *comment; /* Determine length of comment. */ error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; comment=BlobToStringInfo((const void *) NULL,length); if (comment == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } /* Read comment. */ error_manager->profile=comment; p=GetStringInfoDatum(comment); for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++) *p++=(unsigned char) GetCharacter(jpeg_info); *p='\0'; error_manager->profile=NULL; p=GetStringInfoDatum(comment); (void) SetImageProperty(image,"comment",(const char *) p); comment=DestroyStringInfo(comment); return(TRUE); } static boolean ReadICCProfile(j_decompress_ptr jpeg_info) { char magick[12]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *icc_profile, *profile; /* Read color profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } for (i=0; i < 12; i++) magick[i]=(char) GetCharacter(jpeg_info); if (LocaleCompare(magick,ICC_PROFILE) != 0) { /* Not a ICC profile, return. */ for (i=0; i < (ssize_t) (length-12); i++) (void) GetCharacter(jpeg_info); return(TRUE); } (void) GetCharacter(jpeg_info); /* id */ (void) GetCharacter(jpeg_info); /* markers */ length-=14; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; icc_profile=(StringInfo *) GetImageProfile(image,"icc"); if (icc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(icc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: ICC, %.20g bytes",(double) length); return(TRUE); } static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info) { char magick[MaxTextExtent]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *iptc_profile, *profile; /* Determine length of binary data stored here. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } /* Validate that this was written as a Photoshop resource format slug. */ for (i=0; i < 10; i++) magick[i]=(char) GetCharacter(jpeg_info); magick[10]='\0'; length-=10; if (length <= 10) return(TRUE); if (LocaleCompare(magick,"Photoshop ") != 0) { /* Not a IPTC profile, return. */ for (i=0; i < (ssize_t) length; i++) (void) GetCharacter(jpeg_info); return(TRUE); } /* Remove the version number. */ for (i=0; i < 4; i++) (void) GetCharacter(jpeg_info); if (length <= 11) return(TRUE); length-=4; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; iptc_profile=(StringInfo *) GetImageProfile(image,"8bim"); if (iptc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(iptc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: iptc, %.20g bytes",(double) length); return(TRUE); } static boolean ReadProfile(j_decompress_ptr jpeg_info) { char name[MaxTextExtent]; const StringInfo *previous_profile; ErrorManager *error_manager; Image *image; int marker; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *profile; /* Read generic profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; marker=jpeg_info->unread_marker-JPEG_APP0; (void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker); error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; if (marker == 1) { p=GetStringInfoDatum(profile); if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0)) (void) CopyMagickString(name,"exif",MaxTextExtent); if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0)) { ssize_t j; /* Extract namespace from XMP profile. */ p=GetStringInfoDatum(profile); for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++) { if (*p == '\0') break; p++; } if (j < (ssize_t) GetStringInfoLength(profile)) (void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1))); (void) CopyMagickString(name,"xmp",MaxTextExtent); } } previous_profile=GetImageProfile(image,name); if (previous_profile != (const StringInfo *) NULL) { size_t length; length=GetStringInfoLength(profile); SetStringInfoLength(profile,GetStringInfoLength(profile)+ GetStringInfoLength(previous_profile)); (void) memmove(GetStringInfoDatum(profile)+ GetStringInfoLength(previous_profile),GetStringInfoDatum(profile), length); (void) memcpy(GetStringInfoDatum(profile), GetStringInfoDatum(previous_profile), GetStringInfoLength(previous_profile)); } status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: %s, %.20g bytes",name,(double) length); return(TRUE); } static void SkipInputData(j_decompress_ptr cinfo,long number_bytes) { SourceManager *source; if (number_bytes <= 0) return; source=(SourceManager *) cinfo->src; while (number_bytes > (long) source->manager.bytes_in_buffer) { number_bytes-=(long) source->manager.bytes_in_buffer; (void) FillInputBuffer(cinfo); } source->manager.next_input_byte+=number_bytes; source->manager.bytes_in_buffer-=number_bytes; } static void TerminateSource(j_decompress_ptr cinfo) { (void) cinfo; } static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image) { SourceManager *source; cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager)); source=(SourceManager *) cinfo->src; source->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); source=(SourceManager *) cinfo->src; source->manager.init_source=InitializeSource; source->manager.fill_input_buffer=FillInputBuffer; source->manager.skip_input_data=SkipInputData; source->manager.resync_to_restart=jpeg_resync_to_restart; source->manager.term_source=TerminateSource; source->manager.bytes_in_buffer=0; source->manager.next_input_byte=NULL; source->image=image; } static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info, Image *image) { image->quality=UndefinedCompressionQuality; #if defined(D_PROGRESSIVE_SUPPORTED) if (image->compression == LosslessJPEGCompression) { image->quality=100; (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: 100 (lossless)"); } else #endif { ssize_t j, qvalue, sum; register ssize_t i; /* Determine the JPEG compression quality from the quantization tables. */ sum=0; for (i=0; i < NUM_QUANT_TBLS; i++) { if (jpeg_info->quant_tbl_ptrs[i] != NULL) for (j=0; j < DCTSIZE2; j++) sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j]; } if ((jpeg_info->quant_tbl_ptrs[0] != NULL) && (jpeg_info->quant_tbl_ptrs[1] != NULL)) { ssize_t hash[101] = { 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, 0 }, sums[101] = { 32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]+ jpeg_info->quant_tbl_ptrs[1]->quantval[0]+ jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } else if (jpeg_info->quant_tbl_ptrs[0] != NULL) { ssize_t hash[101] = { 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, 0 }, sums[101] = { 16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } } } static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image) { char sampling_factor[MaxTextExtent]; switch (jpeg_info->out_color_space) { case JCS_CMYK: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } case JCS_GRAYSCALE: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAYSCALE"); (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor); break; } case JCS_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", jpeg_info->out_color_space); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } } (void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s", sampling_factor); } static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterJPEGImage() adds properties for the JPEG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterJPEGImage method is: % % size_t RegisterJPEGImage(void) % */ ModuleExport size_t RegisterJPEGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char description[] = "Joint Photographic Experts Group JFIF format"; *version='\0'; #if defined(JPEG_LIB_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION); #endif entry=SetMagickInfo("JPE"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPS"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PJPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterJPEGImage() removes format registrations made by the % JPEG module from the list of supported formats. % % The format of the UnregisterJPEGImage method is: % % UnregisterJPEGImage(void) % */ ModuleExport void UnregisterJPEGImage(void) { (void) UnregisterMagickInfo("PJPG"); (void) UnregisterMagickInfo("JPS"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPEG"); (void) UnregisterMagickInfo("JPE"); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJPEGImage() writes a JPEG image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the WriteJPEGImage method is: % % MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o jpeg_image: The image. % % */ static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); } static boolean EmptyOutputBuffer(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image, MaxBufferExtent,destination->buffer); if (destination->manager.free_in_buffer != MaxBufferExtent) ERREXIT(cinfo,JERR_FILE_WRITE); destination->manager.next_output_byte=destination->buffer; return(TRUE); } static QuantizationTable *GetQuantizationTable(const char *filename, const char *slot,ExceptionInfo *exception) { char *p, *xml; const char *attribute, *content; double value; register ssize_t i; QuantizationTable *table; size_t length; ssize_t j; XMLTreeInfo *description, *levels, *quantization_tables, *table_iterator; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading quantization tables \"%s\" ...",filename); table=(QuantizationTable *) NULL; xml=FileToString(filename,~0UL,exception); if (xml == (char *) NULL) return(table); quantization_tables=NewXMLTree(xml,exception); if (quantization_tables == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } for (table_iterator=GetXMLTreeChild(quantization_tables,"table"); table_iterator != (XMLTreeInfo *) NULL; table_iterator=GetNextXMLTreeTag(table_iterator)) { attribute=GetXMLTreeAttribute(table_iterator,"slot"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; attribute=GetXMLTreeAttribute(table_iterator,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; } if (table_iterator == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } description=GetXMLTreeChild(table_iterator,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<description>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } levels=GetXMLTreeChild(table_iterator,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<levels>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table)); if (table == (QuantizationTable *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); table->slot=(char *) NULL; table->description=(char *) NULL; table->levels=(unsigned int *) NULL; attribute=GetXMLTreeAttribute(table_iterator,"slot"); if (attribute != (char *) NULL) table->slot=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) table->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels width>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->width=StringToUnsignedLong(attribute); if (table->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels width>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->height=StringToUnsignedLong(attribute); if (table->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->divisor=InterpretLocaleValue(attribute,(char **) NULL); if (table->divisor == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent","<levels>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } length=(size_t) table->width*table->height; if (length < 64) length=64; table->levels=(unsigned int *) AcquireQuantumMemory(length, sizeof(*table->levels)); if (table->levels == (unsigned int *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); for (i=0; i < (ssize_t) (table->width*table->height); i++) { table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/ table->divisor+0.5); while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; content=p; } value=InterpretLocaleValue(content,&p); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent","<level> too many values, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } for (j=i; j < 64; j++) table->levels[j]=table->levels[j-1]; quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } static void InitializeDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); destination->manager.next_output_byte=destination->buffer; destination->manager.free_in_buffer=MaxBufferExtent; } static void TerminateDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0) { ssize_t count; count=WriteBlob(destination->image,MaxBufferExtent- destination->manager.free_in_buffer,destination->buffer); if (count != (ssize_t) (MaxBufferExtent-destination->manager.free_in_buffer)) ERREXIT(cinfo,JERR_FILE_WRITE); } } static void WriteProfile(j_compress_ptr jpeg_info,Image *image) { const char *name; const StringInfo *profile; MagickBooleanType iptc; register ssize_t i; size_t length, tag_length; StringInfo *custom_profile; /* Save image profile as a APP marker. */ iptc=MagickFalse; custom_profile=AcquireStringInfo(65535L); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { register unsigned char *p; profile=GetImageProfile(image,name); p=GetStringInfoDatum(custom_profile); if (LocaleCompare(name,"EXIF") == 0) { length=GetStringInfoLength(profile); if (length > 65533L) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"ExifProfileSizeExceedsLimit",image->filename); length=65533L; } jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile), (unsigned int) length); } if (LocaleCompare(name,"ICC") == 0) { register unsigned char *p; tag_length=strlen(ICC_PROFILE); p=GetStringInfoDatum(custom_profile); (void) CopyMagickMemory(p,ICC_PROFILE,tag_length); p[tag_length]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L) { length=MagickMin(GetStringInfoLength(profile)-i,65519L); p[12]=(unsigned char) ((i/65519L)+1); p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1); (void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i, length); jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+3)); } } if (((LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse)) { size_t roundup; iptc=MagickTrue; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L) { length=MagickMin(GetStringInfoLength(profile)-i,65500L); roundup=(size_t) (length & 0x01); if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0) { (void) memcpy(p,"Photoshop 3.0 ",14); tag_length=14; } else { (void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24); tag_length=26; p[24]=(unsigned char) (length >> 8); p[25]=(unsigned char) (length & 0xff); } p[13]=0x00; (void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length); if (roundup != 0) p[length+tag_length]='\0'; jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+roundup)); } } if (LocaleCompare(name,"XMP") == 0) { StringInfo *xmp_profile; /* Add namespace to XMP profile. */ xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ "); if (xmp_profile != (StringInfo *) NULL) { if (profile != (StringInfo *) NULL) ConcatenateStringInfo(xmp_profile,profile); GetStringInfoDatum(xmp_profile)[28]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L) { length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER, GetStringInfoDatum(xmp_profile)+i,(unsigned int) length); } xmp_profile=DestroyStringInfo(xmp_profile); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), "%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile)); name=GetNextImageProfile(image); } custom_profile=DestroyStringInfo(custom_profile); } static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image) { DestinationManager *destination; cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager)); destination=(DestinationManager *) cinfo->dest; destination->manager.init_destination=InitializeDestination; destination->manager.empty_output_buffer=EmptyOutputBuffer; destination->manager.term_destination=TerminateDestination; destination->image=image; } static char **SamplingFactorToList(const char *text) { char **textlist; register char *q; register const char *p; register ssize_t i; if (text == (char *) NULL) return((char **) NULL); /* Convert string to an ASCII list. */ textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS, sizeof(*textlist)); if (textlist == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); p=text; for (i=0; i < (ssize_t) MAX_COMPONENTS; i++) { for (q=(char *) p; *q != '\0'; q++) if (*q == ',') break; textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent, sizeof(*textlist[i])); if (textlist[i] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); (void) CopyMagickString(textlist[i],p,(size_t) (q-p+1)); if (*q == '\r') q++; if (*q == '\0') break; p=q+1; } for (i++; i < (ssize_t) MAX_COMPONENTS; i++) textlist[i]=ConstantString("1x1"); return(textlist); } static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, Image *image) { const char *option, *sampling_factor, *value; ErrorManager error_manager; ExceptionInfo *exception; Image *volatile volatile_image; int colorspace, quality; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType status; MemoryInfo *memory_info; register JSAMPLE *q; register ssize_t i; ssize_t y; struct jpeg_compress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; unsigned short scale; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if ((LocaleCompare(image_info->magick,"JPS") == 0) && (image->next != (Image *) NULL)) image=AppendImages(image,MagickFalse,exception); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); volatile_image=image; jpeg_info.client_data=(void *) volatile_image; jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; error_manager.image=volatile_image; memory_info=(MemoryInfo *) NULL; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); (void) CloseBlob(volatile_image); return(MagickFalse); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_compress(&jpeg_info); JPEGDestinationManager(&jpeg_info,image); if ((image->columns != (unsigned int) image->columns) || (image->rows != (unsigned int) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); jpeg_info.image_width=(unsigned int) image->columns; jpeg_info.image_height=(unsigned int) image->rows; jpeg_info.input_components=3; jpeg_info.data_precision=8; jpeg_info.in_color_space=JCS_RGB; switch (image->colorspace) { case CMYKColorspace: { jpeg_info.input_components=4; jpeg_info.in_color_space=JCS_CMYK; break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { jpeg_info.in_color_space=JCS_YCbCr; break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { if (image_info->type == TrueColorType) break; jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; break; } default: { (void) TransformImageColorspace(image,sRGBColorspace); if (image_info->type == TrueColorType) break; if (SetImageGray(image,&image->exception) != MagickFalse) { jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; } break; } } jpeg_set_defaults(&jpeg_info); if (jpeg_info.in_color_space == JCS_CMYK) jpeg_set_colorspace(&jpeg_info,JCS_YCCK); if ((jpeg_info.data_precision != 12) && (image->depth <= 8)) jpeg_info.data_precision=8; else jpeg_info.data_precision=BITS_IN_JSAMPLE; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { /* Set image resolution. */ jpeg_info.write_JFIF_header=TRUE; jpeg_info.X_density=(UINT16) image->x_resolution; jpeg_info.Y_density=(UINT16) image->y_resolution; /* Set image resolution units. */ if (image->units == PixelsPerInchResolution) jpeg_info.density_unit=(UINT8) 1; if (image->units == PixelsPerCentimeterResolution) jpeg_info.density_unit=(UINT8) 2; } jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:optimize-coding"); if (option != (const char *) NULL) jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; else { MagickSizeType length; length=(MagickSizeType) jpeg_info.input_components*image->columns* image->rows*sizeof(JSAMPLE); if (length == (MagickSizeType) ((size_t) length)) { /* Perform optimization only if available memory resources permit it. */ status=AcquireMagickResource(MemoryResource,length); RelinquishMagickResource(MemoryResource,length); jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE; } } #if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED) if ((LocaleCompare(image_info->magick,"PJPEG") == 0) || (image_info->interlace != NoInterlace)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); jpeg_simple_progression(&jpeg_info); } else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: non-progressive"); #else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); #endif quality=92; if ((image_info->compression != LosslessJPEGCompression) && (image->quality <= 100)) { if (image->quality != UndefinedCompressionQuality) quality=(int) image->quality; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g", (double) image->quality); } else { #if !defined(C_LOSSLESS_SUPPORTED) quality=100; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100"); #else if (image->quality < 100) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"LosslessToLossyJPEGConversion",image->filename); else { int point_transform, predictor; predictor=image->quality/100; /* range 1-7 */ point_transform=image->quality % 20; /* range 0-15 */ jpeg_simple_lossless(&jpeg_info,predictor,point_transform); if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Compression: lossless"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Predictor: %d",predictor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Point Transform: %d",point_transform); } } #endif } option=GetImageOption(image_info,"jpeg:extent"); if (option != (const char *) NULL) { Image *jpeg_image; ImageInfo *jpeg_info; jpeg_info=CloneImageInfo(image_info); jpeg_info->blob=NULL; jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image != (Image *) NULL) { MagickSizeType extent; size_t maximum, minimum; /* Search for compression quality that does not exceed image extent. */ jpeg_image->quality=0; extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0); (void) DeleteImageOption(jpeg_info,"jpeg:extent"); (void) DeleteImageArtifact(jpeg_image,"jpeg:extent"); maximum=image_info->quality; if (maximum < 2) maximum=101; for (minimum=2; minimum < maximum; ) { (void) AcquireUniqueFilename(jpeg_image->filename); jpeg_image->quality=minimum+(maximum-minimum+1)/2; (void) WriteJPEGImage(jpeg_info,jpeg_image); if (GetBlobSize(jpeg_image) <= extent) minimum=jpeg_image->quality+1; else maximum=jpeg_image->quality-1; (void) RelinquishUniqueFileResource(jpeg_image->filename); } quality=(int) minimum-1; jpeg_image=DestroyImage(jpeg_image); } jpeg_info=DestroyImageInfo(jpeg_info); } jpeg_set_quality(&jpeg_info,quality,TRUE); #if (JPEG_LIB_VERSION >= 70) option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) { GeometryInfo geometry_info; int flags; /* Set quality scaling for luminance and chrominance separately. */ flags=ParseGeometry(option,&geometry_info); if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0)) { jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int) (geometry_info.rho+0.5)); jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int) (geometry_info.sigma+0.5)); jpeg_default_qtables(&jpeg_info,TRUE); } } #endif colorspace=jpeg_info.in_color_space; value=GetImageOption(image_info,"jpeg:colorspace"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:colorspace"); if (value != (char *) NULL) colorspace=StringToInteger(value); sampling_factor=(const char *) NULL; if (colorspace == jpeg_info.in_color_space) { value=GetImageOption(image_info,"jpeg:sampling-factor"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor == (const char *) NULL) { if (quality >= 90) for (i=0; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } else { char **factors; GeometryInfo geometry_info; MagickStatusType flags; /* Set sampling factor. */ i=0; factors=SamplingFactorToList(sampling_factor); if (factors != (char **) NULL) { for (i=0; i < MAX_COMPONENTS; i++) { if (factors[i] == (char *) NULL) break; flags=ParseGeometry(factors[i],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho; jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma; factors[i]=(char *) RelinquishMagickMemory(factors[i]); } factors=(char **) RelinquishMagickMemory(factors); } for ( ; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } option=GetImageOption(image_info,"jpeg:q-table"); if (option != (const char *) NULL) { QuantizationTable *table; /* Custom quantization tables. */ table=GetQuantizationTable(option,"0",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=0; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=0; jpeg_add_quant_table(&jpeg_info,0,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"1",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=1; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=1; jpeg_add_quant_table(&jpeg_info,1,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"2",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=2; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=2; jpeg_add_quant_table(&jpeg_info,2,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"3",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=3; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=3; jpeg_add_quant_table(&jpeg_info,3,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } } jpeg_start_compress(&jpeg_info,TRUE); if (image->debug != MagickFalse) { if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g", (double) image->depth); if (image->colors != 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: %.20g",(double) image->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: unspecified"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "JPEG data precision: %d",(int) jpeg_info.data_precision); switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); break; } default: break; } switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAY"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor); break; } case sRGBColorspace: case RGBColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image colorspace is RGB"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", image->colorspace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } } } /* Write JPEG profiles. */ value=GetImageProperty(image,"comment"); if (value != (char *) NULL) for (i=0; i < (ssize_t) strlen(value); i+=65533L) jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i, (unsigned int) MagickMin((size_t) strlen(value+i),65533L)); if (image->profiles != (void *) NULL) WriteProfile(&jpeg_info,image); /* Convert MIFF to JPEG raster pixels. */ memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.input_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickFalse); } scanline[0]=(JSAMPROW) jpeg_pixels; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (scale == 0) scale=1; if (jpeg_info.data_precision <= 8) { if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p)); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum( GetPixelLuma(image,p))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelCyan(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelMagenta(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelYellow(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelBlack(indexes+x)))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum( GetPixelLuma(image,p)))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange- GetPixelIndex(indexes+x))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (y == (ssize_t) image->rows) jpeg_finish_compress(&jpeg_info); /* Relinquish resources. */ jpeg_destroy_compress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/good_4777_0
crossvul-cpp_data_bad_5593_0
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <linux/timekeeper_internal.h> #include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/fpu-internal.h> /* Ugh! */ #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static int kvm_vcpu_reset(struct kvm_vcpu *vcpu); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) { /* TODO: reserve bits check */ kvm_lapic_set_base(vcpu, data); } EXPORT_SYMBOL_GPL(kvm_set_apic_base); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; if (kvm_x86_ops->get_cpl(vcpu) != 0) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; if (xcr0 & ~host_xcr0) return 1; vcpu->arch.xcr0 = xcr0; vcpu->guest_xcr0_loaded = 0; return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) { if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) return 1; } else if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; if (is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* * We don't check reserved bits in nonpae mode, because * this isn't enforced, and VMware depends on this. */ } /* * Does the new cr3 value map to physical memory? (Note, we * catch an invalid cr3 even in real-mode, because it would * cause trouble later on when we turn on paging anyway.) * * A real CPU would silently accept an invalid cr3 and would * attempt to use it - with largely undefined (and often hard * to debug) behavior on the guest side. */ if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); vcpu->arch.mmu.new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); } static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 10 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; static const u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (efer & efer_reserved_bits) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return 1; } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->set_msr(vcpu, msr); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); } #ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; } clock; /* open coded 'struct timespec' */ u64 monotonic_time_snsec; time_t monotonic_time_sec; }; static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->clock->cycle_last; vdata->clock.mask = tk->clock->mask; vdata->clock.mult = tk->mult; vdata->clock.shift = tk->shift; vdata->monotonic_time_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; vdata->monotonic_time_snsec = tk->xtime_nsec + (tk->wall_to_monotonic.tv_nsec << tk->shift); while (vdata->monotonic_time_snsec >= (((u64)NSEC_PER_SEC) << tk->shift)) { vdata->monotonic_time_snsec -= ((u64)NSEC_PER_SEC) << tk->shift; vdata->monotonic_time_sec++; } write_seqcount_end(&vdata->seq); } #endif static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { struct timespec ts; WARN_ON(preemptible()); ktime_get_ts(&ts); monotonic_to_bootbased(&ts); return timespec_to_ns(&ts); } #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; } static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = this_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; } void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif } static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("idivl %2; xor %%edx, %%edx" : "=A"(usdiff) : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %u, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (matched) kvm->arch.nr_vcpus_matched_tsc++; else kvm->arch.nr_vcpus_matched_tsc = 0; kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); } EXPORT_SYMBOL_GPL(kvm_write_tsc); #ifdef CONFIG_X86_64 static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; } static inline u64 vgettsc(cycle_t *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) { unsigned long seq; u64 ns; int mode; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; ts->tv_nsec = 0; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); timespec_add_ns(ts, ns); return mode; } /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { struct timespec ts; /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) return false; monotonic_to_bootbased(&ts); *kernel_ns = timespec_to_ns(&ts); return true; } #endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource & vcpus_matched; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif } static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; struct pvclock_vcpu_time_info *guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); } tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->time_page) return 0; /* * Time as measured by the TSC may go backwards when resetting the base * tsc_timestamp. The reason for this is that the TSC resolution is * higher than the resolution of the other clock scales. Thus, many * possible measurments of the TSC correspond to one measurement of any * other clock, and so a spread of values is possible. This is not a * problem for the computation of the nanosecond clock; with TSC rates * around 1GHZ, there can only be a few cycles which correspond to one * nanosecond value, and any path through this code will inevitably * take longer than that. However, with the kernel_ns value itself, * the precision may be much lower, down to HZ granularity. If the * first sampling of TSC against kernel_ns ends in the low part of the * range, and the second in the high end of the range, we can get: * * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new * * As the sampling errors potentially range in the thousands of cycles, * it is possible such a time value has already been observed by the * guest. To protect against this, we must compute the system time as * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; if (vcpu->hv_clock.tsc_timestamp) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, vcpu->hv_clock.tsc_to_system_mul, vcpu->hv_clock.tsc_shift); max_kernel_ns += vcpu->last_kernel_ns; } if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } /* with a master <monotonic time, tsc value> tuple, * pvclock clock reads always increase at the (scaled) rate * of guest TSC - no need to deal with sampling errors. */ if (!use_master_clock) { if (max_kernel_ns > kernel_ns) kernel_ns = max_kernel_ns; } /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_kernel_ns = kernel_ns; vcpu->last_guest_tsc = tsc_timestamp; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; shared_kaddr = kmap_atomic(vcpu->time_page); guest_hv_clock = shared_kaddr + vcpu->time_offset; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); kunmap_atomic(shared_kaddr); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); return 0; } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ return valid_mtrr_type(data & 0xff); } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; } default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are reserved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: kvm_set_apic_base(vcpu, data); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; /* ...but clean it before doing the actual write */ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); if (is_error_page(vcpu->arch.time_page)) vcpu->arch.time_page = NULL; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS)) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr, data); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr, data); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) if (v == vcpu) data = r; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_PERFCTR0: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.status; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_PCI_2_3: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_IRQFD_RESAMPLE: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.iommu_domain && !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) vcpu->arch.sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, xstate_size); else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, xstate_size); else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[0].value); break; } if (r) r = -EINVAL; return r; } /* * kvm_set_guest_paused() indicates to the guest kernel that it has been * stopped by the hypervisor. This function will be called from the host only. * EINVAL is returned when the host attempts to set the flag for a guest that * does not support pv clocks. */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.time_page) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) return PTR_ERR(u.lapic); r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = 0; kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) return PTR_ERR(u.xsave); r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) return PTR_ERR(u.xcrs); r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) user_tsc_khz = tsc_khz; kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = vcpu->arch.virtual_tsc_khz; goto out; } case KVM_KVMCLOCK_CTRL: { r = kvm_set_guest_paused(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot * @kvm: kvm instance * @log: slot id and address to which we copy the log * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing data, we keep the following order for * each bit: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Flush TLB's if needed. * 4. Copy the snapshot to the userspace. * * Between 2 and 3, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page will be reported dirty at * step 4 using the snapshot taken before and step 3 ensures that successive * writes will be logged for the next call. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } if (is_dirty) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -EINVAL; if (atomic_read(&kvm->online_vcpus)) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } static const struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static const struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, const struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; frag->gpa = gpa; frag->data = val; frag->len = bytes; return X86EMUL_CONTINUE; } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int rc; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; vcpu->mmio_nr_fragments = 0; /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } rc = emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; if (!vcpu->mmio_nr_fragments) return rc; gpa = vcpu->mmio_fragments[0].gpa; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto emul_write; kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { trace_kvm_pio(!in, port, size, count); vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val) { kvm_set_rflags(emul_to_vcpu(ctxt), val); } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) { memset(desc, 0, sizeof(*desc)); return false; } if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.present = desc->p; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { struct msr_data msr; msr.data = data; msr.index = msr_index; msr.host_initiated = false; return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { kvm_register_write(emul_to_vcpu(ctxt), reg, val); } static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .set_rflags = emulator_set_rflags, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (!(int_shadow & mask)) kvm_x86_ops->set_interrupt_shadow(vcpu, mask); } static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->twobyte, 0, (void *)&ctxt->_regs - (void *)&ctxt->twobyte); ctxt->fetch.start = 0; ctxt->fetch.end = 0; ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.pos = 0; ctxt->mem_read.end = 0; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable) { gpa_t gpa = cr2; pfn_t pfn; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. */ vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->perm_ok = false; ctxt->only_vendor_specific_insn = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2, write_fault_to_spt)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; emulator_invalidate_register_cache(ctxt); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2, write_fault_to_spt)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) vcpu->arch.pio.count = 0; else { writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } r = EMULATE_DO_MMIO; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_DO_MMIO; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { toggle_interruptibility(vcpu, ctxt->interruptibility); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } raw_spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; register_hotcpu_notifier(&kvmclock_cpu_notifier_block); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); atomic_set(&kvm_guest_has_master_clock, 0); raw_spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); /* * Notification about pvclock gtod data update. */ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; } static struct notifier_block pvclock_gtod_notifier = { .notifier_call = pvclock_gtod_notify, }; #endif int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = -ENOMEM; shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); goto out; } r = kvm_mmu_module_init(); if (r) goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_init_msr_list(); kvm_x86_ops = ops; kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); #endif return 0; out_free_percpu: free_percpu(shared_msrs); out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; int cs_db, cs_l; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); if (!is_long_mode(vcpu)) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); /* * Blow out the MMU to ensure that no other VCPU has an active mapping * to ensure that the updated hypercall appears atomically across all * VCPUs. */ kvm_mmu_zap_all(vcpu->kvm); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static int vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) return 0; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); if (is_error_page(page)) return -EFAULT; vcpu->arch.apic->vapic_page = page; return 0; } static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } } static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } static void update_eoi_exitmap(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; memset(eoi_exit_bitmap, 0, 32); kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); } static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = 0; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) kvm_gen_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); req_immediate_exit = kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu)) update_eoi_exitmap(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { /* * Update architecture specific hints for APIC * virtual interrupt delivery. */ if (kvm_x86_ops->hwapic_irr_update) kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) { goto cancel_injection; } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); r = 1; goto cancel_injection; } srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); return r; cancel_injection: kvm_x86_ops->cancel_injection(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, vcpu->arch.sipi_vector); kvm_lapic_reset(vcpu); r = kvm_vcpu_reset(vcpu); if (r) return r; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = vapic_enter(vcpu); if (r) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_SIPI_RECEIVED: default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; } static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } static int complete_emulated_pio(struct kvm_vcpu *vcpu) { BUG_ON(!vcpu->arch.pio.count); return complete_emulated_io(vcpu); } /* * Implements the following, as a state machine: * * read: * for each fragment * for each mmio piece in the fragment * write gpa, len * exit * copy data * execute insn * * write: * for each fragment * for each mmio piece in the fragment * write gpa, len * copy data * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } if (unlikely(vcpu->arch.complete_userspace_io)) { int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; vcpu->arch.complete_userspace_io = NULL; r = cui(vcpu); if (r <= 0) goto out; } else WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Userspace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = KVM_NR_INTERRUPTS; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; r = vcpu_load(vcpu); if (r) return r; r = kvm_vcpu_reset(vcpu); if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { int r; struct msr_data msr; r = vcpu_load(vcpu); if (r) return r; msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { int r; vcpu->arch.apf.msr_val = 0; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } static int kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; return kvm_x86_ops->vcpu_reset(vcpu); } int kvm_arch_hardware_enable(void *garbage) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; int ret; u64 local_tsc; u64 max_tsc = 0; bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); ret = kvm_x86_ops->hardware_enable(garbage); if (ret != 0) return ret; local_tsc = native_read_tsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); if (stable && vcpu->arch.last_host_tsc > local_tsc) { backwards_tsc = true; if (vcpu->arch.last_host_tsc > max_tsc) max_tsc = vcpu->arch.last_host_tsc; } } } /* * Sometimes, even reliable TSCs go backwards. This happens on * platforms that reset TSC during suspend or hibernate actions, but * maintain synchronization. We must compensate. Fortunately, we can * detect that condition here, which happens early in CPU bringup, * before any KVM threads can be running. Unfortunately, we can't * bring the TSCs fully up to date with real time, as we aren't yet far * enough into CPU bringup that we know how much real time has actually * elapsed; our helper function, get_kernel_ns() will be using boot * variables that haven't been updated yet. * * So we simply find the maximum observed TSC above, then record the * adjustment to TSC in each VCPU. When the VCPU later gets loaded, * the adjustment will be applied. Note that we accumulate * adjustments, in case multiple suspend cycles happen before some VCPU * gets a chance to run again. In the event that no KVM threads get a * chance to run, we will miss the entire elapsed period, as we'll have * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may * loose cycle time. This isn't too big a deal, since the loss will be * uniform across all VCPUs (not to mention the scenario is extremely * unlikely). It is possible that a second hibernate recovery happens * much faster than a first, causing the observed TSC here to be * smaller; this would require additional padding adjustment, which is * why we set last_host_tsc to the local tsc observed here. * * N.B. - this code below runs only on platforms with reliable TSC, * as that is the only way backwards_tsc is set above. Also note * that this runs for ALL vcpus, which is not a bug; all VCPUs should * have the same delta_cyc adjustment applied if backwards_tsc * is detected. Note further, this adjustment is only done once, * as we reset last_host_tsc on all VCPUs to stop this from being * called multiple times (one for each physical CPU bringup). * * Platforms with unreliable TSCs don't have to deal with this, they * will be compensated by the logic in vcpu_load, which sets the TSC to * catchup mode. This will catchup all VCPUs to real time, but cannot * guarantee that they stay in perfect synchronization. */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.last_host_tsc = local_tsc; set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); } /* * We have to disable TSC offset matching.. if you were * booting a VM while issuing an S4 host suspend.... * you may have some problem. Solving this issue is * left as an exercise to the reader. */ kvm->arch.last_tsc_nsec = 0; kvm->arch.last_tsc_write = 0; } } return 0; } void kvm_arch_hardware_disable(void *garbage) { kvm_x86_ops->hardware_disable(garbage); drop_user_return_notifiers(garbage); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } struct static_key kvm_no_apic_vcpu __read_mostly; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; r = fx_init(vcpu); if (r) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_wbinvd_dirty_mask: free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); if (!irqchip_in_kernel(vcpu->kvm)) static_key_slow_dec(&kvm_no_apic_vcpu); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (type) return -EINVAL; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) continue; if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { kvm_kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvm_kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvm_kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, bool user_alloc) { int npages = memslot->npages; /* * Only private memory slots need to be mapped here since * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) { unsigned long userspace_addr; /* * MAP_SHARED to prevent internal slot pages from being moved * by fork()/COW. */ userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, bool user_alloc) { int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) { int ret; ret = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* * Write protect all pages for dirty logging. * Existing largepage mappings are destroyed here and new ones will * not be created until the end of the logging. */ if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); /* * If memory slot is created, or moved, we need to clear all * mmio sptes. */ if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_arch_flush_shadow_all(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || is_error_page(work->page)) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (is_error_page(work->page)) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5593_0
crossvul-cpp_data_bad_4784_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT IIIII FFFFF FFFFF % % T I F F % % T I FFF FFF % % T I F F % % T IIIII F F % % % % % % Read/Write TIFF Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread_.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_TIFF_DELEGATE) # if defined(MAGICKCORE_HAVE_TIFFCONF_H) # include "tiffconf.h" # endif # include "tiff.h" # include "tiffio.h" # if !defined(COMPRESSION_ADOBE_DEFLATE) # define COMPRESSION_ADOBE_DEFLATE 8 # endif # if !defined(PREDICTOR_HORIZONTAL) # define PREDICTOR_HORIZONTAL 2 # endif # if !defined(TIFFTAG_COPYRIGHT) # define TIFFTAG_COPYRIGHT 33432 # endif # if !defined(TIFFTAG_OPIIMAGEID) # define TIFFTAG_OPIIMAGEID 32781 # endif #include "psd-private.h" /* Typedef declarations. */ typedef enum { ReadSingleSampleMethod, ReadRGBAMethod, ReadCMYKAMethod, ReadYCCKMethod, ReadStripMethod, ReadTileMethod, ReadGenericMethod } TIFFMethodType; #if defined(MAGICKCORE_HAVE_TIFFREADEXIFDIRECTORY) typedef struct _ExifInfo { unsigned int tag, type, variable_length; const char *property; } ExifInfo; static const ExifInfo exif_info[] = { { EXIFTAG_EXPOSURETIME, TIFF_RATIONAL, 0, "exif:ExposureTime" }, { EXIFTAG_FNUMBER, TIFF_RATIONAL, 0, "exif:FNumber" }, { EXIFTAG_EXPOSUREPROGRAM, TIFF_SHORT, 0, "exif:ExposureProgram" }, { EXIFTAG_SPECTRALSENSITIVITY, TIFF_ASCII, 0, "exif:SpectralSensitivity" }, { EXIFTAG_ISOSPEEDRATINGS, TIFF_SHORT, 1, "exif:ISOSpeedRatings" }, { EXIFTAG_OECF, TIFF_NOTYPE, 0, "exif:OptoelectricConversionFactor" }, { EXIFTAG_EXIFVERSION, TIFF_NOTYPE, 0, "exif:ExifVersion" }, { EXIFTAG_DATETIMEORIGINAL, TIFF_ASCII, 0, "exif:DateTimeOriginal" }, { EXIFTAG_DATETIMEDIGITIZED, TIFF_ASCII, 0, "exif:DateTimeDigitized" }, { EXIFTAG_COMPONENTSCONFIGURATION, TIFF_NOTYPE, 0, "exif:ComponentsConfiguration" }, { EXIFTAG_COMPRESSEDBITSPERPIXEL, TIFF_RATIONAL, 0, "exif:CompressedBitsPerPixel" }, { EXIFTAG_SHUTTERSPEEDVALUE, TIFF_SRATIONAL, 0, "exif:ShutterSpeedValue" }, { EXIFTAG_APERTUREVALUE, TIFF_RATIONAL, 0, "exif:ApertureValue" }, { EXIFTAG_BRIGHTNESSVALUE, TIFF_SRATIONAL, 0, "exif:BrightnessValue" }, { EXIFTAG_EXPOSUREBIASVALUE, TIFF_SRATIONAL, 0, "exif:ExposureBiasValue" }, { EXIFTAG_MAXAPERTUREVALUE, TIFF_RATIONAL, 0, "exif:MaxApertureValue" }, { EXIFTAG_SUBJECTDISTANCE, TIFF_RATIONAL, 0, "exif:SubjectDistance" }, { EXIFTAG_METERINGMODE, TIFF_SHORT, 0, "exif:MeteringMode" }, { EXIFTAG_LIGHTSOURCE, TIFF_SHORT, 0, "exif:LightSource" }, { EXIFTAG_FLASH, TIFF_SHORT, 0, "exif:Flash" }, { EXIFTAG_FOCALLENGTH, TIFF_RATIONAL, 0, "exif:FocalLength" }, { EXIFTAG_SUBJECTAREA, TIFF_NOTYPE, 0, "exif:SubjectArea" }, { EXIFTAG_MAKERNOTE, TIFF_NOTYPE, 0, "exif:MakerNote" }, { EXIFTAG_USERCOMMENT, TIFF_NOTYPE, 0, "exif:UserComment" }, { EXIFTAG_SUBSECTIME, TIFF_ASCII, 0, "exif:SubSecTime" }, { EXIFTAG_SUBSECTIMEORIGINAL, TIFF_ASCII, 0, "exif:SubSecTimeOriginal" }, { EXIFTAG_SUBSECTIMEDIGITIZED, TIFF_ASCII, 0, "exif:SubSecTimeDigitized" }, { EXIFTAG_FLASHPIXVERSION, TIFF_NOTYPE, 0, "exif:FlashpixVersion" }, { EXIFTAG_PIXELXDIMENSION, TIFF_LONG, 0, "exif:PixelXDimension" }, { EXIFTAG_PIXELYDIMENSION, TIFF_LONG, 0, "exif:PixelYDimension" }, { EXIFTAG_RELATEDSOUNDFILE, TIFF_ASCII, 0, "exif:RelatedSoundFile" }, { EXIFTAG_FLASHENERGY, TIFF_RATIONAL, 0, "exif:FlashEnergy" }, { EXIFTAG_SPATIALFREQUENCYRESPONSE, TIFF_NOTYPE, 0, "exif:SpatialFrequencyResponse" }, { EXIFTAG_FOCALPLANEXRESOLUTION, TIFF_RATIONAL, 0, "exif:FocalPlaneXResolution" }, { EXIFTAG_FOCALPLANEYRESOLUTION, TIFF_RATIONAL, 0, "exif:FocalPlaneYResolution" }, { EXIFTAG_FOCALPLANERESOLUTIONUNIT, TIFF_SHORT, 0, "exif:FocalPlaneResolutionUnit" }, { EXIFTAG_SUBJECTLOCATION, TIFF_SHORT, 0, "exif:SubjectLocation" }, { EXIFTAG_EXPOSUREINDEX, TIFF_RATIONAL, 0, "exif:ExposureIndex" }, { EXIFTAG_SENSINGMETHOD, TIFF_SHORT, 0, "exif:SensingMethod" }, { EXIFTAG_FILESOURCE, TIFF_NOTYPE, 0, "exif:FileSource" }, { EXIFTAG_SCENETYPE, TIFF_NOTYPE, 0, "exif:SceneType" }, { EXIFTAG_CFAPATTERN, TIFF_NOTYPE, 0, "exif:CFAPattern" }, { EXIFTAG_CUSTOMRENDERED, TIFF_SHORT, 0, "exif:CustomRendered" }, { EXIFTAG_EXPOSUREMODE, TIFF_SHORT, 0, "exif:ExposureMode" }, { EXIFTAG_WHITEBALANCE, TIFF_SHORT, 0, "exif:WhiteBalance" }, { EXIFTAG_DIGITALZOOMRATIO, TIFF_RATIONAL, 0, "exif:DigitalZoomRatio" }, { EXIFTAG_FOCALLENGTHIN35MMFILM, TIFF_SHORT, 0, "exif:FocalLengthIn35mmFilm" }, { EXIFTAG_SCENECAPTURETYPE, TIFF_SHORT, 0, "exif:SceneCaptureType" }, { EXIFTAG_GAINCONTROL, TIFF_RATIONAL, 0, "exif:GainControl" }, { EXIFTAG_CONTRAST, TIFF_SHORT, 0, "exif:Contrast" }, { EXIFTAG_SATURATION, TIFF_SHORT, 0, "exif:Saturation" }, { EXIFTAG_SHARPNESS, TIFF_SHORT, 0, "exif:Sharpness" }, { EXIFTAG_DEVICESETTINGDESCRIPTION, TIFF_NOTYPE, 0, "exif:DeviceSettingDescription" }, { EXIFTAG_SUBJECTDISTANCERANGE, TIFF_SHORT, 0, "exif:SubjectDistanceRange" }, { EXIFTAG_IMAGEUNIQUEID, TIFF_ASCII, 0, "exif:ImageUniqueID" }, { 0, 0, 0, (char *) NULL } }; #endif #endif /* MAGICKCORE_TIFF_DELEGATE */ /* Global declarations. */ static MagickThreadKey tiff_exception; static SemaphoreInfo *tiff_semaphore = (SemaphoreInfo *) NULL; static TIFFErrorHandler error_handler, warning_handler; static volatile MagickBooleanType instantiate_key = MagickFalse; /* Forward declarations. */ #if defined(MAGICKCORE_TIFF_DELEGATE) static Image * ReadTIFFImage(const ImageInfo *,ExceptionInfo *); static MagickBooleanType WriteGROUP4Image(const ImageInfo *,Image *), WritePTIFImage(const ImageInfo *,Image *), WriteTIFFImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTIFF() returns MagickTrue if the image format type, identified by the % magick string, is TIFF. % % The format of the IsTIFF method is: % % MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsTIFF(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"\115\115\000\052",4) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\052\000",4) == 0) return(MagickTrue); #if defined(TIFF_VERSION_BIG) if (length < 8) return(MagickFalse); if (memcmp(magick,"\115\115\000\053\000\010\000\000",8) == 0) return(MagickTrue); if (memcmp(magick,"\111\111\053\000\010\000\000\000",8) == 0) return(MagickTrue); #endif return(MagickFalse); } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadGROUP4Image() reads a raw CCITT Group 4 image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadGROUP4Image method is: % % Image *ReadGROUP4Image(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline size_t WriteLSBLong(FILE *file,const size_t value) { unsigned char buffer[4]; buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); return(fwrite(buffer,1,4,file)); } static Image *ReadGROUP4Image(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MaxTextExtent]; FILE *file; Image *image; ImageInfo *read_info; int c, unique_file; MagickBooleanType status; size_t length; ssize_t offset, strip_offset; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Write raw CCITT Group 4 wrapped as a TIFF image file. */ file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile"); length=fwrite("\111\111\052\000\010\000\000\000\016\000",1,10,file); length=fwrite("\376\000\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\000\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,image->columns); length=fwrite("\001\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,image->rows); length=fwrite("\002\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\003\001\003\000\001\000\000\000\004\000\000\000",1,12,file); length=fwrite("\006\001\003\000\001\000\000\000\000\000\000\000",1,12,file); length=fwrite("\021\001\003\000\001\000\000\000",1,8,file); strip_offset=10+(12*14)+4+8; length=WriteLSBLong(file,(size_t) strip_offset); length=fwrite("\022\001\003\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(size_t) image_info->orientation); length=fwrite("\025\001\003\000\001\000\000\000\001\000\000\000",1,12,file); length=fwrite("\026\001\004\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,image->rows); length=fwrite("\027\001\004\000\001\000\000\000\000\000\000\000",1,12,file); offset=(ssize_t) ftell(file)-4; length=fwrite("\032\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(size_t) (strip_offset-8)); length=fwrite("\033\001\005\000\001\000\000\000",1,8,file); length=WriteLSBLong(file,(size_t) (strip_offset-8)); length=fwrite("\050\001\003\000\001\000\000\000\002\000\000\000",1,12,file); length=fwrite("\000\000\000\000",1,4,file); length=WriteLSBLong(file,(size_t) (image->x_resolution+0.5)); length=WriteLSBLong(file,1); status=MagickTrue; for (length=0; (c=ReadBlobByte(image)) != EOF; length++) if (fputc(c,file) != c) status=MagickFalse; offset=(ssize_t) fseek(file,(ssize_t) offset,SEEK_SET); length=WriteLSBLong(file,(unsigned int) length); (void) fclose(file); (void) CloseBlob(image); image=DestroyImage(image); /* Read TIFF image. */ read_info=CloneImageInfo((ImageInfo *) NULL); (void) FormatLocaleString(read_info->filename,MaxTextExtent,"%s",filename); image=ReadTIFFImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) { (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,"GROUP4",MaxTextExtent); } (void) RelinquishUniqueFileResource(filename); if (status == MagickFalse) image=DestroyImage(image); return(image); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadTIFFImage() reads a Tagged image file and returns it. It allocates the % memory necessary for the new Image structure and returns a pointer to the % new image. % % The format of the ReadTIFFImage method is: % % Image *ReadTIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline unsigned char ClampYCC(double value) { value=255.0-value; if (value < 0.0) return((unsigned char)0); if (value > 255.0) return((unsigned char)255); return((unsigned char)(value)); } static MagickBooleanType DecodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(q)+0.5; if (a > 1.0) a-=1.0; b=QuantumScale*GetPixelb(q)+0.5; if (b > 1.0) b-=1.0; SetPixela(q,QuantumRange*a); SetPixelb(q,QuantumRange*b); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType ReadProfile(Image *image,const char *name, const unsigned char *datum,ssize_t length) { MagickBooleanType status; StringInfo *profile; if (length < 4) return(MagickFalse); profile=BlobToStringInfo(datum,(size_t) length); if (profile == (StringInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); return(MagickTrue); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int TIFFCloseBlob(thandle_t image) { (void) CloseBlob((Image *) image); return(0); } static void TIFFErrors(const char *module,const char *format,va_list error) { char message[MaxTextExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MaxTextExtent,format,error); #else (void) vsprintf(message,format,error); #endif (void) ConcatenateMagickString(message,".",MaxTextExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderError,message, "`%s'",module); } static toff_t TIFFGetBlobSize(thandle_t image) { return((toff_t) GetBlobSize((Image *) image)); } static void TIFFGetProfiles(TIFF *tiff,Image *image,MagickBooleanType ping) { uint32 length; unsigned char *profile; length=0; if (ping == MagickFalse) { #if defined(TIFFTAG_ICCPROFILE) if ((TIFFGetField(tiff,TIFFTAG_ICCPROFILE,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) (void) ReadProfile(image,"icc",profile,(ssize_t) length); #endif #if defined(TIFFTAG_PHOTOSHOP) if ((TIFFGetField(tiff,TIFFTAG_PHOTOSHOP,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) (void) ReadProfile(image,"8bim",profile,(ssize_t) length); #endif #if defined(TIFFTAG_RICHTIFFIPTC) if ((TIFFGetField(tiff,TIFFTAG_RICHTIFFIPTC,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) { if (TIFFIsByteSwapped(tiff) != 0) TIFFSwabArrayOfLong((uint32 *) profile,(size_t) length); (void) ReadProfile(image,"iptc",profile,4L*length); } #endif #if defined(TIFFTAG_XMLPACKET) if ((TIFFGetField(tiff,TIFFTAG_XMLPACKET,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) (void) ReadProfile(image,"xmp",profile,(ssize_t) length); #endif if ((TIFFGetField(tiff,34118,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) (void) ReadProfile(image,"tiff:34118",profile,(ssize_t) length); } if ((TIFFGetField(tiff,37724,&length,&profile) == 1) && (profile != (unsigned char *) NULL)) (void) ReadProfile(image,"tiff:37724",profile,(ssize_t) length); } static void TIFFGetProperties(TIFF *tiff,Image *image) { char message[MaxTextExtent], *text; uint32 count, length, type; unsigned long *tietz; if (TIFFGetField(tiff,TIFFTAG_ARTIST,&text) == 1) (void) SetImageProperty(image,"tiff:artist",text); if (TIFFGetField(tiff,TIFFTAG_COPYRIGHT,&text) == 1) (void) SetImageProperty(image,"tiff:copyright",text); if (TIFFGetField(tiff,TIFFTAG_DATETIME,&text) == 1) (void) SetImageProperty(image,"tiff:timestamp",text); if (TIFFGetField(tiff,TIFFTAG_DOCUMENTNAME,&text) == 1) (void) SetImageProperty(image,"tiff:document",text); if (TIFFGetField(tiff,TIFFTAG_HOSTCOMPUTER,&text) == 1) (void) SetImageProperty(image,"tiff:hostcomputer",text); if (TIFFGetField(tiff,TIFFTAG_IMAGEDESCRIPTION,&text) == 1) (void) SetImageProperty(image,"comment",text); if (TIFFGetField(tiff,TIFFTAG_MAKE,&text) == 1) (void) SetImageProperty(image,"tiff:make",text); if (TIFFGetField(tiff,TIFFTAG_MODEL,&text) == 1) (void) SetImageProperty(image,"tiff:model",text); if (TIFFGetField(tiff,TIFFTAG_OPIIMAGEID,&count,&text) == 1) { if (count >= MaxTextExtent) count=MaxTextExtent-1; (void) CopyMagickString(message,text,count+1); (void) SetImageProperty(image,"tiff:image-id",message); } if (TIFFGetField(tiff,TIFFTAG_PAGENAME,&text) == 1) (void) SetImageProperty(image,"label",text); if (TIFFGetField(tiff,TIFFTAG_SOFTWARE,&text) == 1) (void) SetImageProperty(image,"tiff:software",text); if (TIFFGetField(tiff,33423,&count,&text) == 1) { if (count >= MaxTextExtent) count=MaxTextExtent-1; (void) CopyMagickString(message,text,count+1); (void) SetImageProperty(image,"tiff:kodak-33423",message); } if (TIFFGetField(tiff,36867,&count,&text) == 1) { if (count >= MaxTextExtent) count=MaxTextExtent-1; (void) CopyMagickString(message,text,count+1); (void) SetImageProperty(image,"tiff:kodak-36867",message); } if (TIFFGetField(tiff,TIFFTAG_SUBFILETYPE,&type) == 1) switch (type) { case 0x01: { (void) SetImageProperty(image,"tiff:subfiletype","REDUCEDIMAGE"); break; } case 0x02: { (void) SetImageProperty(image,"tiff:subfiletype","PAGE"); break; } case 0x04: { (void) SetImageProperty(image,"tiff:subfiletype","MASK"); break; } default: break; } if (TIFFGetField(tiff,37706,&length,&tietz) == 1) { (void) FormatLocaleString(message,MaxTextExtent,"%lu",tietz[0]); (void) SetImageProperty(image,"tiff:tietz_offset",message); } } static void TIFFGetEXIFProperties(TIFF *tiff,Image *image) { #if defined(MAGICKCORE_HAVE_TIFFREADEXIFDIRECTORY) char value[MaxTextExtent]; register ssize_t i; tdir_t directory; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif offset; void *sans; /* Read EXIF properties. */ offset=0; if (TIFFGetField(tiff,TIFFTAG_EXIFIFD,&offset) != 1) return; directory=TIFFCurrentDirectory(tiff); if (TIFFReadEXIFDirectory(tiff,offset) != 1) { TIFFSetDirectory(tiff,directory); return; } sans=NULL; for (i=0; exif_info[i].tag != 0; i++) { *value='\0'; switch (exif_info[i].type) { case TIFF_ASCII: { char *ascii; ascii=(char *) NULL; if ((TIFFGetField(tiff,exif_info[i].tag,&ascii,&sans,&sans) == 1) && (ascii != (char *) NULL) && (*ascii != '\0')) (void) CopyMagickString(value,ascii,MaxTextExtent); break; } case TIFF_SHORT: { if (exif_info[i].variable_length == 0) { uint16 shorty; shorty=0; if (TIFFGetField(tiff,exif_info[i].tag,&shorty,&sans,&sans) == 1) (void) FormatLocaleString(value,MaxTextExtent,"%d",shorty); } else { int tiff_status; uint16 *shorty; uint16 shorty_num; tiff_status=TIFFGetField(tiff,exif_info[i].tag,&shorty_num,&shorty, &sans,&sans); if (tiff_status == 1) (void) FormatLocaleString(value,MaxTextExtent,"%d", shorty_num != 0 ? shorty[0] : 0); } break; } case TIFF_LONG: { uint32 longy; longy=0; if (TIFFGetField(tiff,exif_info[i].tag,&longy,&sans,&sans) == 1) (void) FormatLocaleString(value,MaxTextExtent,"%d",longy); break; } #if defined(TIFF_VERSION_BIG) case TIFF_LONG8: { uint64 long8y; long8y=0; if (TIFFGetField(tiff,exif_info[i].tag,&long8y,&sans,&sans) == 1) (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) ((MagickOffsetType) long8y)); break; } #endif case TIFF_RATIONAL: case TIFF_SRATIONAL: case TIFF_FLOAT: { float floaty; floaty=0.0; if (TIFFGetField(tiff,exif_info[i].tag,&floaty,&sans,&sans) == 1) (void) FormatLocaleString(value,MaxTextExtent,"%g",(double) floaty); break; } case TIFF_DOUBLE: { double doubley; doubley=0.0; if (TIFFGetField(tiff,exif_info[i].tag,&doubley,&sans,&sans) == 1) (void) FormatLocaleString(value,MaxTextExtent,"%g",doubley); break; } default: break; } if (*value != '\0') (void) SetImageProperty(image,exif_info[i].property,value); } TIFFSetDirectory(tiff,directory); #else (void) tiff; (void) image; #endif } static int TIFFMapBlob(thandle_t image,tdata_t *base,toff_t *size) { *base=(tdata_t *) GetBlobStreamData((Image *) image); if (*base != (tdata_t *) NULL) *size=(toff_t) GetBlobSize((Image *) image); if (*base != (tdata_t *) NULL) return(1); return(0); } static tsize_t TIFFReadBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) ReadBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static int32 TIFFReadPixels(TIFF *tiff,size_t bits_per_sample, tsample_t sample,ssize_t row,tdata_t scanline) { int32 status; (void) bits_per_sample; status=TIFFReadScanline(tiff,scanline,(uint32) row,sample); return(status); } static toff_t TIFFSeekBlob(thandle_t image,toff_t offset,int whence) { return((toff_t) SeekBlob((Image *) image,(MagickOffsetType) offset,whence)); } static void TIFFUnmapBlob(thandle_t image,tdata_t base,toff_t size) { (void) image; (void) base; (void) size; } static void TIFFWarnings(const char *module,const char *format,va_list warning) { char message[MaxTextExtent]; ExceptionInfo *exception; #if defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsnprintf(message,MaxTextExtent,format,warning); #else (void) vsprintf(message,format,warning); #endif (void) ConcatenateMagickString(message,".",MaxTextExtent); exception=(ExceptionInfo *) GetMagickThreadValue(tiff_exception); if (exception != (ExceptionInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),CoderWarning, message,"`%s'",module); } static tsize_t TIFFWriteBlob(thandle_t image,tdata_t data,tsize_t size) { tsize_t count; count=(tsize_t) WriteBlob((Image *) image,(size_t) size, (unsigned char *) data); return(count); } static TIFFMethodType GetJPEGMethod(Image* image,TIFF *tiff,uint16 photometric, uint16 bits_per_sample,uint16 samples_per_pixel) { #define BUFFER_SIZE 2048 MagickOffsetType position, offset; register size_t i; TIFFMethodType method; #if defined(TIFF_VERSION_BIG) uint64 #else uint32 #endif **value; unsigned char buffer[BUFFER_SIZE+32]; unsigned short length; /* only support 8 bit for now */ if ((photometric != PHOTOMETRIC_SEPARATED) || (bits_per_sample != 8) || (samples_per_pixel != 4)) return(ReadGenericMethod); /* Search for Adobe APP14 JPEG Marker */ if (!TIFFGetField(tiff,TIFFTAG_STRIPOFFSETS,&value)) return(ReadRGBAMethod); position=TellBlob(image); offset=(MagickOffsetType) (value[0]); if (SeekBlob(image,offset,SEEK_SET) != offset) return(ReadRGBAMethod); method=ReadRGBAMethod; if (ReadBlob(image,BUFFER_SIZE,buffer) == BUFFER_SIZE) { for (i=0; i < BUFFER_SIZE; i++) { while (i < BUFFER_SIZE) { if (buffer[i++] == 255) break; } while (i < BUFFER_SIZE) { if (buffer[++i] != 255) break; } if (buffer[i++] == 216) /* JPEG_MARKER_SOI */ continue; length=(unsigned short) (((unsigned int) (buffer[i] << 8) | (unsigned int) buffer[i+1]) & 0xffff); if (i+(size_t) length >= BUFFER_SIZE) break; if (buffer[i-1] == 238) /* JPEG_MARKER_APP0+14 */ { if (length != 14) break; /* 0 == CMYK, 1 == YCbCr, 2 = YCCK */ if (buffer[i+13] == 2) method=ReadYCCKMethod; break; } i+=(size_t) length; } } (void) SeekBlob(image,position,SEEK_SET); return(method); } static void TIFFReadPhotoshopLayers(Image* image,const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; const StringInfo *layer_info; Image *layers; PSDInfo info; register ssize_t i; if (GetImageListLength(image) != 1) return; if ((image_info->number_scenes == 1) && (image_info->scene == 0)) return; option=GetImageOption(image_info,"tiff:ignore-layers"); if (option != (const char * ) NULL) return; layer_info=GetImageProfile(image,"tiff:37724"); if (layer_info == (const StringInfo *) NULL) return; for (i=0; i < (ssize_t) layer_info->length-8; i++) { if (LocaleNCompare((const char *) (layer_info->datum+i), image->endian == MSBEndian ? "8BIM" : "MIB8",4) != 0) continue; i+=4; if ((LocaleNCompare((const char *) (layer_info->datum+i), image->endian == MSBEndian ? "Layr" : "ryaL",4) == 0) || (LocaleNCompare((const char *) (layer_info->datum+i), image->endian == MSBEndian ? "LMsk" : "ksML",4) == 0) || (LocaleNCompare((const char *) (layer_info->datum+i), image->endian == MSBEndian ? "Lr16" : "61rL",4) == 0) || (LocaleNCompare((const char *) (layer_info->datum+i), image->endian == MSBEndian ? "Lr32" : "23rL",4) == 0)) break; } i+=4; if (i >= (ssize_t) (layer_info->length-8)) return; layers=CloneImage(image,image->columns,image->rows,MagickTrue,exception); (void) DeleteImageProfile(layers,"tiff:37724"); AttachBlob(layers->blob,layer_info->datum,layer_info->length); SeekBlob(layers,(MagickOffsetType) i,SEEK_SET); info.version=1; info.columns=layers->columns; info.rows=layers->rows; /* Setting the mode to a value that won't change the colorspace */ info.mode=10; if (IsGrayImage(image,&image->exception) != MagickFalse) info.channels=(image->matte != MagickFalse ? 2UL : 1UL); else if (image->storage_class == PseudoClass) info.channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->colorspace != CMYKColorspace) info.channels=(image->matte != MagickFalse ? 4UL : 3UL); else info.channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) ReadPSDLayers(layers,image_info,&info,MagickFalse,exception); InheritException(exception,&layers->exception); DeleteImageFromList(&layers); if (layers != (Image *) NULL) { SetImageArtifact(image,"tiff:has-layers","true"); AppendImageToList(&image,layers); while (layers != (Image *) NULL) { SetImageArtifact(layers,"tiff:has-layers","true"); DetachBlob(layers->blob); layers=GetNextImageInList(layers); } } } #if defined(__cplusplus) || defined(c_plusplus) } #endif static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType status; MagickSizeType number_pixels; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t pad; ssize_t y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t)TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } do { DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) TIFFPrintDirectory(tiff,stdout,MagickFalse); RestoreMSCWarning if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point"); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black"); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white"); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette"); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB"); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB"); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)"); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV"); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK"); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated"); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR"); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown"); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric")); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb"); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb"); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) SetImageColorspace(image,GRAYColorspace); if (photometric == PHOTOMETRIC_SEPARATED) SetImageColorspace(image,CMYKColorspace); if (photometric == PHOTOMETRIC_CIELAB) SetImageColorspace(image,LabColorspace); TIFFGetProfiles(tiff,image,image_info->ping); TIFFGetProperties(tiff,image); option=GetImageOption(image_info,"tiff:exif-properties"); if ((option == (const char *) NULL) || (IsMagickTrue(option) != MagickFalse)) TIFFGetEXIFProperties(tiff,image); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution) == 1)) { image->x_resolution=x_resolution; image->y_resolution=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position) == 1)) { image->page.x=(ssize_t) ceil(x_position*image->x_resolution-0.5); image->page.y=(ssize_t) ceil(y_position*image->y_resolution-0.5); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MaxTextExtent]; int tiff_status; uint16 horizontal, vertical; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_YCBCRSUBSAMPLING, &horizontal,&vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; default: image->compression=RLECompression; break; } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { TIFFClose(tiff); quantum_info=DestroyQuantumInfo(quantum_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified"); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->matte=MagickTrue; } else for (i=0; i < extra_samples; i++) { image->matte=MagickTrue; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated"); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) (void) SetImageProperty(image,"tiff:alpha","unassociated"); } } if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages) == 1) image->scene=value; if (image->storage_class == PseudoClass) { int tiff_status; size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } if (image->matte == MagickFalse) image->depth=GetImageDepth(image,exception); } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { quantum_info=DestroyQuantumInfo(quantum_info); break; } goto next_tiff_frame; } method=ReadGenericMethod; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char value[MaxTextExtent]; method=ReadStripMethod; (void) FormatLocaleString(value,MaxTextExtent,"%u",(unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",value); } if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_CONTIG)) method=ReadRGBAMethod; if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_SEPARATE)) method=ReadCMYKAMethod; if ((photometric != PHOTOMETRIC_RGB) && (photometric != PHOTOMETRIC_CIELAB) && (photometric != PHOTOMETRIC_SEPARATED)) method=ReadGenericMethod; if (image->storage_class == PseudoClass) method=ReadSingleSampleMethod; if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) method=ReadSingleSampleMethod; if ((photometric != PHOTOMETRIC_SEPARATED) && (interlace == PLANARCONFIG_SEPARATE) && (bits_per_sample < 64)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); if (compress_tag == COMPRESSION_JBIG) method=ReadStripMethod; if (TIFFIsTiled(tiff) != MagickFalse) method=ReadTileMethod; quantum_info->endian=LSBEndian; quantum_type=RGBQuantum; pixels=GetQuantumPixels(quantum_info); switch (method) { case ReadSingleSampleMethod: { /* Convert TIFF image to PseudoClass MIFF image. */ quantum_type=IndexQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); if (image->matte != MagickFalse) { if (image->storage_class != PseudoClass) { quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } else { quantum_type=IndexAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } } else if (image->storage_class != PseudoClass) { quantum_type=GrayQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadRGBAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); quantum_type=RGBQuantum; if (image->matte != MagickFalse) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); quantum_type=CMYKQuantum; if (image->matte != MagickFalse) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadCMYKAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ for (i=0; i < (ssize_t) samples_per_pixel; i++) { for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; int status; status=TIFFReadPixels(tiff,bits_per_sample,(tsample_t) i,y,(char *) pixels); if (status == -1) break; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (image->colorspace != CMYKColorspace) switch (i) { case 0: quantum_type=RedQuantum; break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } else switch (i) { case 0: quantum_type=CyanQuantum; break; case 1: quantum_type=MagentaQuantum; break; case 2: quantum_type=YellowQuantum; break; case 3: quantum_type=BlackQuantum; break; case 4: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadYCCKMethod: { pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register IndexPacket *indexes; register PixelPacket *magick_restrict q; register ssize_t x; unsigned char *p; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456))); SetPixelMagenta(q,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984))); SetPixelYellow(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816))); SetPixelBlack(indexes+x,ScaleCharToQuantum((unsigned char)*(p+3))); q++; p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { register uint32 *p; /* Convert stripped TIFF image to DirectClass MIFF image. */ i=0; p=(uint32 *) NULL; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (i == 0) { if (TIFFReadRGBAStrip(tiff,(tstrip_t) y,(uint32 *) pixels) == 0) break; i=(ssize_t) MagickMin((ssize_t) rows_per_strip,(ssize_t) image->rows-y); } i--; p=((uint32 *) pixels)+image->columns*i; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) (TIFFGetR(*p)))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) (TIFFGetG(*p)))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) (TIFFGetB(*p)))); if (image->matte != MagickFalse) SetPixelOpacity(q,ScaleCharToQuantum((unsigned char) (TIFFGetA(*p)))); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadTileMethod: { register uint32 *p; uint32 *tile_pixels, columns, rows; /* Convert tiled TIFF image to DirectClass MIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) { TIFFClose(tiff); ThrowReaderException(CoderError,"ImageIsNotTiled"); } (void) SetImageStorageClass(image,DirectClass); number_pixels=(MagickSizeType) columns*rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } tile_pixels=(uint32 *) AcquireQuantumMemory(number_pixels, sizeof(*tile_pixels)); if (tile_pixels == (uint32 *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } for (y=0; y < (ssize_t) image->rows; y+=rows) { PixelPacket *tile; register ssize_t x; register PixelPacket *magick_restrict q; size_t columns_remaining, rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; tile=QueueAuthenticPixels(image,0,y,image->columns,rows_remaining, exception); if (tile == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t column, row; if (TIFFReadRGBATile(tiff,(uint32) x,(uint32) y,tile_pixels) == 0) break; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; p=tile_pixels+(rows-rows_remaining)*columns; q=tile+(image->columns*(rows_remaining-1)+x); for (row=rows_remaining; row > 0; row--) { if (image->matte != MagickFalse) for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); q++; p++; } else for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); q++; p++; } p+=columns-columns_remaining; q-=(image->columns+columns_remaining); } } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } tile_pixels=(uint32 *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *pixel_info; register uint32 *p; uint32 *pixels; /* Convert TIFF image to DirectClass MIFF image. */ number_pixels=(MagickSizeType) image->columns*image->rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(uint32 *) GetVirtualMemoryBlob(pixel_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); /* Convert image to DirectClass pixel packets. */ p=pixels+number_pixels-1; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; q+=image->columns-1; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); p--; q--; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } pixel_info=RelinquishVirtualMemory(pixel_info); break; } } SetQuantumImageType(image,quantum_type); next_tiff_frame: quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while (status != MagickFalse); TIFFClose(tiff); TIFFReadPhotoshopLayers(image,image_info,exception); if (image_info->number_scenes != 0) { if (image_info->scene >= GetImageListLength(image)) { /* Subimage was not found in the Photoshop layer */ image = DestroyImageList(image); return((Image *)NULL); } } return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterTIFFImage() adds properties for the TIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterTIFFImage method is: % % size_t RegisterTIFFImage(void) % */ #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) static TIFFExtendProc tag_extender = (TIFFExtendProc) NULL; static void TIFFIgnoreTags(TIFF *tiff) { char *q; const char *p, *tags; Image *image; register ssize_t i; size_t count; TIFFFieldInfo *ignore; if (TIFFGetReadProc(tiff) != TIFFReadBlob) return; image=(Image *)TIFFClientdata(tiff); tags=GetImageArtifact(image,"tiff:ignore-tags"); if (tags == (const char *) NULL) return; count=0; p=tags; while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; (void) strtol(p,&q,10); if (p == q) return; p=q; count++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } if (count == 0) return; i=0; p=tags; ignore=(TIFFFieldInfo *) AcquireQuantumMemory(count,sizeof(*ignore)); /* This also sets field_bit to 0 (FIELD_IGNORE) */ ResetMagickMemory(ignore,0,count*sizeof(*ignore)); while (*p != '\0') { while ((isspace((int) ((unsigned char) *p)) != 0)) p++; ignore[i].field_tag=(ttag_t) strtol(p,&q,10); p=q; i++; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; } (void) TIFFMergeFieldInfo(tiff,ignore,(uint32) count); ignore=(TIFFFieldInfo *) RelinquishMagickMemory(ignore); } static void TIFFTagExtender(TIFF *tiff) { static const TIFFFieldInfo TIFFExtensions[] = { { 37724, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "PhotoshopLayerData" }, { 34118, -3, -3, TIFF_UNDEFINED, FIELD_CUSTOM, 1, 1, (char *) "Microscope" } }; TIFFMergeFieldInfo(tiff,TIFFExtensions,sizeof(TIFFExtensions)/ sizeof(*TIFFExtensions)); if (tag_extender != (TIFFExtendProc) NULL) (*tag_extender)(tiff); TIFFIgnoreTags(tiff); } #endif ModuleExport size_t RegisterTIFFImage(void) { #define TIFFDescription "Tagged Image File Format" char version[MaxTextExtent]; MagickInfo *entry; if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key == MagickFalse) { if (CreateMagickThreadKey(&tiff_exception,NULL) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); error_handler=TIFFSetErrorHandler(TIFFErrors); warning_handler=TIFFSetWarningHandler(TIFFWarnings); #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) tag_extender=TIFFSetTagExtender(TIFFTagExtender); #endif instantiate_key=MagickTrue; } UnlockSemaphoreInfo(tiff_semaphore); *version='\0'; #if defined(TIFF_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",TIFF_VERSION); #endif #if defined(MAGICKCORE_TIFF_DELEGATE) { const char *p; register ssize_t i; p=TIFFGetVersion(); for (i=0; (i < (MaxTextExtent-1)) && (*p != 0) && (*p != '\n'); i++) version[i]=(*p++); version[i]='\0'; } #endif entry=SetMagickInfo("GROUP4"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadGROUP4Image; entry->encoder=(EncodeImageHandler *) WriteGROUP4Image; #endif entry->raw=MagickTrue; entry->endian_support=MagickTrue; entry->adjoin=MagickFalse; entry->format_type=ImplicitFormatType; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Raw CCITT Group4"); entry->mime_type=ConstantString("image/tiff"); entry->module=ConstantString("TIFF"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PTIF"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WritePTIFImage; #endif entry->endian_support=MagickTrue; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Pyramid encoded TIFF"); entry->mime_type=ConstantString("image/tiff"); entry->module=ConstantString("TIFF"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("TIF"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->endian_support=MagickTrue; entry->seekable_stream=MagickTrue; entry->stealth=MagickTrue; entry->description=ConstantString(TIFFDescription); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); entry->module=ConstantString("TIFF"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("TIFF"); #if defined(MAGICKCORE_TIFF_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->magick=(IsImageFormatHandler *) IsTIFF; entry->endian_support=MagickTrue; entry->seekable_stream=MagickTrue; entry->description=ConstantString(TIFFDescription); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); entry->module=ConstantString("TIFF"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("TIFF64"); #if defined(TIFF_VERSION_BIG) entry->decoder=(DecodeImageHandler *) ReadTIFFImage; entry->encoder=(EncodeImageHandler *) WriteTIFFImage; #endif entry->adjoin=MagickFalse; entry->endian_support=MagickTrue; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Tagged Image File Format (64-bit)"); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/tiff"); entry->module=ConstantString("TIFF"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterTIFFImage() removes format registrations made by the TIFF module % from the list of supported formats. % % The format of the UnregisterTIFFImage method is: % % UnregisterTIFFImage(void) % */ ModuleExport void UnregisterTIFFImage(void) { (void) UnregisterMagickInfo("TIFF64"); (void) UnregisterMagickInfo("TIFF"); (void) UnregisterMagickInfo("TIF"); (void) UnregisterMagickInfo("PTIF"); if (tiff_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&tiff_semaphore); LockSemaphoreInfo(tiff_semaphore); if (instantiate_key != MagickFalse) { if (DeleteMagickThreadKey(tiff_exception) == MagickFalse) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); #if defined(MAGICKCORE_HAVE_TIFFMERGEFIELDINFO) && defined(MAGICKCORE_HAVE_TIFFSETTAGEXTENDER) if (tag_extender == (TIFFExtendProc) NULL) (void) TIFFSetTagExtender(tag_extender); #endif (void) TIFFSetWarningHandler(warning_handler); (void) TIFFSetErrorHandler(error_handler); instantiate_key=MagickFalse; } UnlockSemaphoreInfo(tiff_semaphore); DestroySemaphoreInfo(&tiff_semaphore); } #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e G R O U P 4 I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteGROUP4Image() writes an image in the raw CCITT Group 4 image format. % % The format of the WriteGROUP4Image method is: % % MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info, Image *image) { char filename[MaxTextExtent]; FILE *file; Image *huffman_image; ImageInfo *write_info; int unique_file; MagickBooleanType status; register ssize_t i; ssize_t count; TIFF *tiff; toff_t *byte_count, strip_size; unsigned char *buffer; /* Write image as CCITT Group4 TIFF image to a temporary file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); huffman_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (huffman_image == (Image *) NULL) { (void) CloseBlob(image); return(MagickFalse); } huffman_image->endian=MSBEndian; file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { ThrowFileException(&image->exception,FileOpenError, "UnableToCreateTemporaryFile",filename); return(MagickFalse); } (void) FormatLocaleString(huffman_image->filename,MaxTextExtent,"tiff:%s", filename); (void) SetImageType(huffman_image,BilevelType); write_info=CloneImageInfo((ImageInfo *) NULL); SetImageInfoFile(write_info,file); (void) SetImageType(image,BilevelType); (void) SetImageDepth(image,1); write_info->compression=Group4Compression; write_info->type=BilevelType; (void) SetImageOption(write_info,"quantum:polarity","min-is-white"); status=WriteTIFFImage(write_info,huffman_image); (void) fflush(file); write_info=DestroyImageInfo(write_info); if (status == MagickFalse) { InheritException(&image->exception,&huffman_image->exception); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } tiff=TIFFOpen(filename,"rb"); if (tiff == (TIFF *) NULL) { huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile", image_info->filename); return(MagickFalse); } /* Allocate raw strip buffer. */ if (TIFFGetField(tiff,TIFFTAG_STRIPBYTECOUNTS,&byte_count) != 1) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); return(MagickFalse); } strip_size=byte_count[0]; for (i=1; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) if (byte_count[i] > strip_size) strip_size=byte_count[i]; buffer=(unsigned char *) AcquireQuantumMemory((size_t) strip_size, sizeof(*buffer)); if (buffer == (unsigned char *) NULL) { TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image_info->filename); } /* Compress runlength encoded to 2D Huffman pixels. */ for (i=0; i < (ssize_t) TIFFNumberOfStrips(tiff); i++) { count=(ssize_t) TIFFReadRawStrip(tiff,(uint32) i,buffer,strip_size); if (WriteBlob(image,(size_t) count,buffer) != count) status=MagickFalse; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); TIFFClose(tiff); huffman_image=DestroyImage(huffman_image); (void) fclose(file); (void) RelinquishUniqueFileResource(filename); (void) CloseBlob(image); return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P T I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePTIFImage() writes an image in the pyrimid-encoded Tagged image file % format. % % The format of the WritePTIFImage method is: % % MagickBooleanType WritePTIFImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WritePTIFImage(const ImageInfo *image_info, Image *image) { ExceptionInfo *exception; Image *images, *next, *pyramid_image; ImageInfo *write_info; MagickBooleanType status; PointInfo resolution; size_t columns, rows; /* Create pyramid-encoded TIFF image. */ exception=(&image->exception); images=NewImageList(); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { Image *clone_image; clone_image=CloneImage(next,0,0,MagickFalse,exception); if (clone_image == (Image *) NULL) break; clone_image->previous=NewImageList(); clone_image->next=NewImageList(); (void) SetImageProperty(clone_image,"tiff:subfiletype","none"); AppendImageToList(&images,clone_image); columns=next->columns; rows=next->rows; resolution.x=next->x_resolution; resolution.y=next->y_resolution; while ((columns > 64) && (rows > 64)) { columns/=2; rows/=2; resolution.x/=2.0; resolution.y/=2.0; pyramid_image=ResizeImage(next,columns,rows,image->filter,image->blur, exception); if (pyramid_image == (Image *) NULL) break; pyramid_image->x_resolution=resolution.x; pyramid_image->y_resolution=resolution.y; (void) SetImageProperty(pyramid_image,"tiff:subfiletype","REDUCEDIMAGE"); AppendImageToList(&images,pyramid_image); } } /* Write pyramid-encoded TIFF image. */ write_info=CloneImageInfo(image_info); write_info->adjoin=MagickTrue; status=WriteTIFFImage(write_info,GetFirstImageInList(images)); images=DestroyImageList(images); write_info=DestroyImageInfo(write_info); return(status); } #endif #if defined(MAGICKCORE_TIFF_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W r i t e T I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteTIFFImage() writes an image in the Tagged image file format. % % The format of the WriteTIFFImage method is: % % MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % */ typedef struct _TIFFInfo { RectangleInfo tile_geometry; unsigned char *scanline, *scanlines, *pixels; } TIFFInfo; static void DestroyTIFFInfo(TIFFInfo *tiff_info) { assert(tiff_info != (TIFFInfo *) NULL); if (tiff_info->scanlines != (unsigned char *) NULL) tiff_info->scanlines=(unsigned char *) RelinquishMagickMemory( tiff_info->scanlines); if (tiff_info->pixels != (unsigned char *) NULL) tiff_info->pixels=(unsigned char *) RelinquishMagickMemory( tiff_info->pixels); } static MagickBooleanType EncodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(q)-0.5; if (a < 0.0) a+=1.0; b=QuantumScale*GetPixelb(q)-0.5; if (b < 0.0) b+=1.0; SetPixela(q,QuantumRange*a); SetPixelb(q,QuantumRange*b); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetTIFFInfo(const ImageInfo *image_info,TIFF *tiff, TIFFInfo *tiff_info) { const char *option; MagickStatusType flags; uint32 tile_columns, tile_rows; assert(tiff_info != (TIFFInfo *) NULL); (void) ResetMagickMemory(tiff_info,0,sizeof(*tiff_info)); option=GetImageOption(image_info,"tiff:tile-geometry"); if (option == (const char *) NULL) return(MagickTrue); flags=ParseAbsoluteGeometry(option,&tiff_info->tile_geometry); if ((flags & HeightValue) == 0) tiff_info->tile_geometry.height=tiff_info->tile_geometry.width; tile_columns=(uint32) tiff_info->tile_geometry.width; tile_rows=(uint32) tiff_info->tile_geometry.height; TIFFDefaultTileSize(tiff,&tile_columns,&tile_rows); (void) TIFFSetField(tiff,TIFFTAG_TILEWIDTH,tile_columns); (void) TIFFSetField(tiff,TIFFTAG_TILELENGTH,tile_rows); tiff_info->tile_geometry.width=tile_columns; tiff_info->tile_geometry.height=tile_rows; tiff_info->scanlines=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFScanlineSize(tiff),sizeof(*tiff_info->scanlines)); tiff_info->pixels=(unsigned char *) AcquireQuantumMemory((size_t) tile_rows*TIFFTileSize(tiff),sizeof(*tiff_info->scanlines)); if ((tiff_info->scanlines == (unsigned char *) NULL) || (tiff_info->pixels == (unsigned char *) NULL)) { DestroyTIFFInfo(tiff_info); return(MagickFalse); } return(MagickTrue); } static int32 TIFFWritePixels(TIFF *tiff,TIFFInfo *tiff_info,ssize_t row, tsample_t sample,Image *image) { int32 status; register ssize_t i; register unsigned char *p, *q; size_t number_tiles, tile_width; ssize_t bytes_per_pixel, j, k, l; if (TIFFIsTiled(tiff) == 0) return(TIFFWriteScanline(tiff,tiff_info->scanline,(uint32) row,sample)); /* Fill scanlines to tile height. */ i=(ssize_t) (row % tiff_info->tile_geometry.height)*TIFFScanlineSize(tiff); (void) CopyMagickMemory(tiff_info->scanlines+i,(char *) tiff_info->scanline, (size_t) TIFFScanlineSize(tiff)); if (((size_t) (row % tiff_info->tile_geometry.height) != (tiff_info->tile_geometry.height-1)) && (row != (ssize_t) (image->rows-1))) return(0); /* Write tile to TIFF image. */ status=0; bytes_per_pixel=TIFFTileSize(tiff)/(ssize_t) (tiff_info->tile_geometry.height* tiff_info->tile_geometry.width); number_tiles=(image->columns+tiff_info->tile_geometry.width)/ tiff_info->tile_geometry.width; for (i=0; i < (ssize_t) number_tiles; i++) { tile_width=(i == (ssize_t) (number_tiles-1)) ? image->columns-(i* tiff_info->tile_geometry.width) : tiff_info->tile_geometry.width; for (j=0; j < (ssize_t) ((row % tiff_info->tile_geometry.height)+1); j++) for (k=0; k < (ssize_t) tile_width; k++) { if (bytes_per_pixel == 0) { p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)/8); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k/8); *q++=(*p++); continue; } p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i* tiff_info->tile_geometry.width+k)*bytes_per_pixel); q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k*bytes_per_pixel); for (l=0; l < bytes_per_pixel; l++) *q++=(*p++); } if ((i*tiff_info->tile_geometry.width) != image->columns) status=TIFFWriteTile(tiff,tiff_info->pixels,(uint32) (i* tiff_info->tile_geometry.width),(uint32) ((row/ tiff_info->tile_geometry.height)*tiff_info->tile_geometry.height),0, sample); if (status < 0) break; } return(status); } static void TIFFSetProfiles(TIFF *tiff,Image *image) { const char *name; const StringInfo *profile; if (image->profiles == (void *) NULL) return; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (GetStringInfoLength(profile) == 0) { name=GetNextImageProfile(image); continue; } #if defined(TIFFTAG_XMLPACKET) if (LocaleCompare(name,"xmp") == 0) (void) TIFFSetField(tiff,TIFFTAG_XMLPACKET,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif #if defined(TIFFTAG_ICCPROFILE) if (LocaleCompare(name,"icc") == 0) (void) TIFFSetField(tiff,TIFFTAG_ICCPROFILE,(uint32) GetStringInfoLength( profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"iptc") == 0) { size_t length; StringInfo *iptc_profile; iptc_profile=CloneStringInfo(profile); length=GetStringInfoLength(profile)+4-(GetStringInfoLength(profile) & 0x03); SetStringInfoLength(iptc_profile,length); if (TIFFIsByteSwapped(tiff)) TIFFSwabArrayOfLong((uint32 *) GetStringInfoDatum(iptc_profile), (unsigned long) (length/4)); (void) TIFFSetField(tiff,TIFFTAG_RICHTIFFIPTC,(uint32) GetStringInfoLength(iptc_profile)/4,GetStringInfoDatum(iptc_profile)); iptc_profile=DestroyStringInfo(iptc_profile); } #if defined(TIFFTAG_PHOTOSHOP) if (LocaleCompare(name,"8bim") == 0) (void) TIFFSetField(tiff,TIFFTAG_PHOTOSHOP,(uint32) GetStringInfoLength(profile),GetStringInfoDatum(profile)); #endif if (LocaleCompare(name,"tiff:37724") == 0) (void) TIFFSetField(tiff,37724,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); if (LocaleCompare(name,"tiff:34118") == 0) (void) TIFFSetField(tiff,34118,(uint32) GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } static void TIFFSetProperties(TIFF *tiff,const ImageInfo *image_info, Image *image) { const char *value; value=GetImageArtifact(image,"tiff:document"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DOCUMENTNAME,value); value=GetImageArtifact(image,"tiff:hostcomputer"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_HOSTCOMPUTER,value); value=GetImageArtifact(image,"tiff:artist"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_ARTIST,value); value=GetImageArtifact(image,"tiff:timestamp"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_DATETIME,value); value=GetImageArtifact(image,"tiff:make"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MAKE,value); value=GetImageArtifact(image,"tiff:model"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_MODEL,value); value=GetImageArtifact(image,"tiff:software"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_SOFTWARE,value); value=GetImageArtifact(image,"tiff:copyright"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_COPYRIGHT,value); value=GetImageArtifact(image,"kodak-33423"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,33423,value); value=GetImageArtifact(image,"kodak-36867"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,36867,value); value=GetImageProperty(image,"label"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_PAGENAME,value); value=GetImageProperty(image,"comment"); if (value != (const char *) NULL) (void) TIFFSetField(tiff,TIFFTAG_IMAGEDESCRIPTION,value); value=GetImageArtifact(image,"tiff:subfiletype"); if (value != (const char *) NULL) { if (LocaleCompare(value,"REDUCEDIMAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); else if (LocaleCompare(value,"PAGE") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); else if (LocaleCompare(value,"MASK") == 0) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_MASK); } else { uint16 page, pages; page=(uint16) image->scene; pages=(uint16) GetImageListLength(image); if ((image_info->adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } } static void TIFFSetEXIFProperties(TIFF *tiff,Image *image) { #if defined(MAGICKCORE_HAVE_TIFFREADEXIFDIRECTORY) const char *value; register ssize_t i; uint32 offset; /* Write EXIF properties. */ offset=0; (void) TIFFSetField(tiff,TIFFTAG_SUBIFD,1,&offset); for (i=0; exif_info[i].tag != 0; i++) { value=GetImageProperty(image,exif_info[i].property); if (value == (const char *) NULL) continue; switch (exif_info[i].type) { case TIFF_ASCII: { (void) TIFFSetField(tiff,exif_info[i].tag,value); break; } case TIFF_SHORT: { uint16 field; field=(uint16) StringToLong(value); (void) TIFFSetField(tiff,exif_info[i].tag,field); break; } case TIFF_LONG: { uint16 field; field=(uint16) StringToLong(value); (void) TIFFSetField(tiff,exif_info[i].tag,field); break; } case TIFF_RATIONAL: case TIFF_SRATIONAL: { float field; field=StringToDouble(value,(char **) NULL); (void) TIFFSetField(tiff,exif_info[i].tag,field); break; } default: break; } } /* (void) TIFFSetField(tiff,TIFFTAG_EXIFIFD,offset); */ #else (void) tiff; (void) image; #endif } static MagickBooleanType WriteTIFFImage(const ImageInfo *image_info, Image *image) { #if !defined(TIFFDefaultStripSize) #define TIFFDefaultStripSize(tiff,request) (8192UL/TIFFScanlineSize(tiff)) #endif const char *mode, *option; CompressionType compression; EndianType endian_type; MagickBooleanType debug, status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; ssize_t y; TIFF *tiff; TIFFInfo tiff_info; uint16 bits_per_sample, compress_tag, endian, photometric; uint32 rows_per_strip; unsigned char *pixels; /* Open TIFF file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) SetMagickThreadValue(tiff_exception,&image->exception); endian_type=UndefinedEndian; option=GetImageOption(image_info,"tiff:endian"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian_type=MSBEndian; if (LocaleNCompare(option,"lsb",3) == 0) endian_type=LSBEndian;; } switch (endian_type) { case LSBEndian: mode="wl"; break; case MSBEndian: mode="wb"; break; default: mode="w"; break; } #if defined(TIFF_VERSION_BIG) if (LocaleCompare(image_info->magick,"TIFF64") == 0) switch (endian_type) { case LSBEndian: mode="wl8"; break; case MSBEndian: mode="wb8"; break; default: mode="w8"; break; } #endif tiff=TIFFClientOpen(image->filename,mode,(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) return(MagickFalse); scene=0; debug=IsEventLogging(); (void) debug; do { /* Initialize TIFF fields. */ if ((image_info->type != UndefinedType) && (image_info->type != OptimizeType)) (void) SetImageType(image,image_info->type); compression=UndefinedCompression; if (image->compression != JPEGCompression) compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; switch (compression) { case FaxCompression: case Group4Compression: { (void) SetImageType(image,BilevelType); (void) SetImageDepth(image,1); break; } case JPEGCompression: { (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); break; } default: break; } quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((image->storage_class != PseudoClass) && (image->depth >= 32) && (quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,&image->exception) != MagickFalse)) { status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } if ((LocaleCompare(image_info->magick,"PTIF") == 0) && (GetPreviousImageInList(image) != (Image *) NULL)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_REDUCEDIMAGE); if ((image->columns != (uint32) image->columns) || (image->rows != (uint32) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); (void) TIFFSetField(tiff,TIFFTAG_IMAGELENGTH,(uint32) image->rows); (void) TIFFSetField(tiff,TIFFTAG_IMAGEWIDTH,(uint32) image->columns); switch (compression) { case FaxCompression: { compress_tag=COMPRESSION_CCITTFAX3; SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } case Group4Compression: { compress_tag=COMPRESSION_CCITTFAX4; SetQuantumMinIsWhite(quantum_info,MagickTrue); break; } #if defined(COMPRESSION_JBIG) case JBIG1Compression: { compress_tag=COMPRESSION_JBIG; break; } #endif case JPEGCompression: { compress_tag=COMPRESSION_JPEG; break; } #if defined(COMPRESSION_LZMA) case LZMACompression: { compress_tag=COMPRESSION_LZMA; break; } #endif case LZWCompression: { compress_tag=COMPRESSION_LZW; break; } case RLECompression: { compress_tag=COMPRESSION_PACKBITS; break; } case ZipCompression: { compress_tag=COMPRESSION_ADOBE_DEFLATE; break; } case NoCompression: default: { compress_tag=COMPRESSION_NONE; break; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; } #else switch (compress_tag) { #if defined(CCITT_SUPPORT) case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: #endif #if defined(YCBCR_SUPPORT) && defined(JPEG_SUPPORT) case COMPRESSION_JPEG: #endif #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: #endif #if defined(LZW_SUPPORT) case COMPRESSION_LZW: #endif #if defined(PACKBITS_SUPPORT) case COMPRESSION_PACKBITS: #endif #if defined(ZIP_SUPPORT) case COMPRESSION_ADOBE_DEFLATE: #endif case COMPRESSION_NONE: break; default: { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CompressionNotSupported","`%s'",CommandOptionToMnemonic( MagickCompressOptions,(ssize_t) compression)); compress_tag=COMPRESSION_NONE; break; } } #endif if (image->colorspace == CMYKColorspace) { photometric=PHOTOMETRIC_SEPARATED; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,4); (void) TIFFSetField(tiff,TIFFTAG_INKSET,INKSET_CMYK); } else { /* Full color TIFF raster. */ if (image->colorspace == LabColorspace) { photometric=PHOTOMETRIC_CIELAB; EncodeLabImage(image,&image->exception); } else if (image->colorspace == YCbCrColorspace) { photometric=PHOTOMETRIC_YCBCR; (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,1,1); (void) SetImageStorageClass(image,DirectClass); (void) SetImageDepth(image,8); } else photometric=PHOTOMETRIC_RGB; (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,3); if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType)) { if ((image_info->type != PaletteType) && (SetImageGray(image,&image->exception) != MagickFalse)) { photometric=(uint16) (quantum_info->min_is_white != MagickFalse ? PHOTOMETRIC_MINISWHITE : PHOTOMETRIC_MINISBLACK); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); if ((image->depth == 1) && (image->matte == MagickFalse)) SetImageMonochrome(image,&image->exception); } else if (image->storage_class == PseudoClass) { size_t depth; /* Colormapped TIFF raster. */ (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,1); photometric=PHOTOMETRIC_PALETTE; depth=1; while ((GetQuantumRange(depth)+1) < image->colors) depth<<=1; status=SetQuantumDepth(image,quantum_info,depth); if (status == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); } } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian); if ((compress_tag == COMPRESSION_CCITTFAX3) && (photometric != PHOTOMETRIC_MINISWHITE)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } else if ((compress_tag == COMPRESSION_CCITTFAX4) && (photometric != PHOTOMETRIC_MINISWHITE)) { compress_tag=COMPRESSION_NONE; endian=FILLORDER_MSB2LSB; } option=GetImageOption(image_info,"tiff:fill-order"); if (option != (const char *) NULL) { if (LocaleNCompare(option,"msb",3) == 0) endian=FILLORDER_MSB2LSB; if (LocaleNCompare(option,"lsb",3) == 0) endian=FILLORDER_LSB2MSB; } (void) TIFFSetField(tiff,TIFFTAG_COMPRESSION,compress_tag); (void) TIFFSetField(tiff,TIFFTAG_FILLORDER,endian); (void) TIFFSetField(tiff,TIFFTAG_BITSPERSAMPLE,quantum_info->depth); if (image->matte != MagickFalse) { uint16 extra_samples, sample_info[1], samples_per_pixel; /* TIFF has a matte channel. */ extra_samples=1; sample_info[0]=EXTRASAMPLE_UNASSALPHA; option=GetImageOption(image_info,"tiff:alpha"); if (option != (const char *) NULL) { if (LocaleCompare(option,"associated") == 0) sample_info[0]=EXTRASAMPLE_ASSOCALPHA; else if (LocaleCompare(option,"unspecified") == 0) sample_info[0]=EXTRASAMPLE_UNSPECIFIED; } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); (void) TIFFSetField(tiff,TIFFTAG_SAMPLESPERPIXEL,samples_per_pixel+1); (void) TIFFSetField(tiff,TIFFTAG_EXTRASAMPLES,extra_samples, &sample_info); if (sample_info[0] == EXTRASAMPLE_ASSOCALPHA) SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha); } (void) TIFFSetField(tiff,TIFFTAG_PHOTOMETRIC,photometric); switch (quantum_info->format) { case FloatingPointQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_IEEEFP); (void) TIFFSetField(tiff,TIFFTAG_SMINSAMPLEVALUE,quantum_info->minimum); (void) TIFFSetField(tiff,TIFFTAG_SMAXSAMPLEVALUE,quantum_info->maximum); break; } case SignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_INT); break; } case UnsignedQuantumFormat: { (void) TIFFSetField(tiff,TIFFTAG_SAMPLEFORMAT,SAMPLEFORMAT_UINT); break; } default: break; } (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,ORIENTATION_TOPLEFT); (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_CONTIG); if (photometric == PHOTOMETRIC_RGB) if ((image_info->interlace == PlaneInterlace) || (image_info->interlace == PartitionInterlace)) (void) TIFFSetField(tiff,TIFFTAG_PLANARCONFIG,PLANARCONFIG_SEPARATE); rows_per_strip=TIFFDefaultStripSize(tiff,0); option=GetImageOption(image_info,"tiff:rows-per-strip"); if (option != (const char *) NULL) rows_per_strip=(size_t) strtol(option,(char **) NULL,10); switch (compress_tag) { case COMPRESSION_JPEG: { #if defined(JPEG_SUPPORT) const char *sampling_factor; GeometryInfo geometry_info; MagickStatusType flags; rows_per_strip+=(16-(rows_per_strip % 16)); if (image_info->quality != UndefinedCompressionQuality) (void) TIFFSetField(tiff,TIFFTAG_JPEGQUALITY,image_info->quality); (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RAW); if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { const char *value; (void) TIFFSetField(tiff,TIFFTAG_JPEGCOLORMODE,JPEGCOLORMODE_RGB); sampling_factor=(const char *) NULL; value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor != (const char *) NULL) { flags=ParseGeometry(sampling_factor,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if (image->colorspace == YCbCrColorspace) (void) TIFFSetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,(uint16) geometry_info.rho,(uint16) geometry_info.sigma); } } (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (bits_per_sample == 12) (void) TIFFSetField(tiff,TIFFTAG_JPEGTABLESMODE,JPEGTABLESMODE_QUANT); #endif break; } case COMPRESSION_ADOBE_DEFLATE: { rows_per_strip=(uint32) image->rows; (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL); (void) TIFFSetField(tiff,TIFFTAG_ZIPQUALITY,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } case COMPRESSION_CCITTFAX3: { /* Byte-aligned EOL. */ rows_per_strip=(uint32) image->rows; (void) TIFFSetField(tiff,TIFFTAG_GROUP3OPTIONS,4); break; } case COMPRESSION_CCITTFAX4: { rows_per_strip=(uint32) image->rows; break; } #if defined(LZMA_SUPPORT) && defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: { if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL); (void) TIFFSetField(tiff,TIFFTAG_LZMAPRESET,(long) ( image_info->quality == UndefinedCompressionQuality ? 7 : MagickMin((ssize_t) image_info->quality/10,9))); break; } #endif case COMPRESSION_LZW: { (void) TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (((photometric == PHOTOMETRIC_RGB) || (photometric == PHOTOMETRIC_MINISBLACK)) && ((bits_per_sample == 8) || (bits_per_sample == 16))) (void) TIFFSetField(tiff,TIFFTAG_PREDICTOR,PREDICTOR_HORIZONTAL); break; } default: break; } if (rows_per_strip < 1) rows_per_strip=1; if ((image->rows/rows_per_strip) >= (1UL << 15)) rows_per_strip=(uint32) (image->rows >> 15); (void) TIFFSetField(tiff,TIFFTAG_ROWSPERSTRIP,rows_per_strip); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { unsigned short units; /* Set image resolution. */ units=RESUNIT_NONE; if (image->units == PixelsPerInchResolution) units=RESUNIT_INCH; if (image->units == PixelsPerCentimeterResolution) units=RESUNIT_CENTIMETER; (void) TIFFSetField(tiff,TIFFTAG_RESOLUTIONUNIT,(uint16) units); (void) TIFFSetField(tiff,TIFFTAG_XRESOLUTION,image->x_resolution); (void) TIFFSetField(tiff,TIFFTAG_YRESOLUTION,image->y_resolution); if ((image->page.x < 0) || (image->page.y < 0)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"TIFF: negative image positions unsupported","%s", image->filename); if ((image->page.x > 0) && (image->x_resolution > 0.0)) { /* Set horizontal image position. */ (void) TIFFSetField(tiff,TIFFTAG_XPOSITION,(float) image->page.x/ image->x_resolution); } if ((image->page.y > 0) && (image->y_resolution > 0.0)) { /* Set vertical image position. */ (void) TIFFSetField(tiff,TIFFTAG_YPOSITION,(float) image->page.y/ image->y_resolution); } } if (image->chromaticity.white_point.x != 0.0) { float chromaticity[6]; /* Set image chromaticity. */ chromaticity[0]=(float) image->chromaticity.red_primary.x; chromaticity[1]=(float) image->chromaticity.red_primary.y; chromaticity[2]=(float) image->chromaticity.green_primary.x; chromaticity[3]=(float) image->chromaticity.green_primary.y; chromaticity[4]=(float) image->chromaticity.blue_primary.x; chromaticity[5]=(float) image->chromaticity.blue_primary.y; (void) TIFFSetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,chromaticity); chromaticity[0]=(float) image->chromaticity.white_point.x; chromaticity[1]=(float) image->chromaticity.white_point.y; (void) TIFFSetField(tiff,TIFFTAG_WHITEPOINT,chromaticity); } if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (GetImageListLength(image) > 1)) { (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); if (image->scene != 0) (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,(uint16) image->scene, GetImageListLength(image)); } if (image->orientation != UndefinedOrientation) (void) TIFFSetField(tiff,TIFFTAG_ORIENTATION,(uint16) image->orientation); (void) TIFFSetProfiles(tiff,image); { uint16 page, pages; page=(uint16) scene; pages=(uint16) GetImageListLength(image); if ((LocaleCompare(image_info->magick,"PTIF") != 0) && (image_info->adjoin != MagickFalse) && (pages > 1)) (void) TIFFSetField(tiff,TIFFTAG_SUBFILETYPE,FILETYPE_PAGE); (void) TIFFSetField(tiff,TIFFTAG_PAGENUMBER,page,pages); } (void) TIFFSetProperties(tiff,image_info,image); DisableMSCWarning(4127) if (0) RestoreMSCWarning (void) TIFFSetEXIFProperties(tiff,image); /* Write image scanlines. */ if (GetTIFFInfo(image_info,tiff,&tiff_info) == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); quantum_info->endian=LSBEndian; pixels=GetQuantumPixels(quantum_info); tiff_info.scanline=GetQuantumPixels(quantum_info); switch (photometric) { case PHOTOMETRIC_CIELAB: case PHOTOMETRIC_YCBCR: case PHOTOMETRIC_RGB: { /* RGB TIFF image. */ switch (image_info->interlace) { case NoInterlace: default: { quantum_type=RGBQuantum; if (image->matte != MagickFalse) quantum_type=RGBAQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } break; } case PlaneInterlace: case PartitionInterlace: { /* Plane interlacing: RRRRRR...GGGGGG...BBBBBB... */ for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,100,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GreenQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,1,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,200,400); if (status == MagickFalse) break; } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,BlueQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,2,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,300,400); if (status == MagickFalse) break; } if (image->matte != MagickFalse) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,AlphaQuantum,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,3,image) == -1) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,400,400); if (status == MagickFalse) break; } break; } } break; } case PHOTOMETRIC_SEPARATED: { /* CMYK TIFF image. */ quantum_type=CMYKQuantum; if (image->matte != MagickFalse) quantum_type=CMYKAQuantum; if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case PHOTOMETRIC_PALETTE: { uint16 *blue, *green, *red; /* Colormapped TIFF image. */ red=(uint16 *) AcquireQuantumMemory(65536,sizeof(*red)); green=(uint16 *) AcquireQuantumMemory(65536,sizeof(*green)); blue=(uint16 *) AcquireQuantumMemory(65536,sizeof(*blue)); if ((red == (uint16 *) NULL) || (green == (uint16 *) NULL) || (blue == (uint16 *) NULL)) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize TIFF colormap. */ (void) ResetMagickMemory(red,0,65536*sizeof(*red)); (void) ResetMagickMemory(green,0,65536*sizeof(*green)); (void) ResetMagickMemory(blue,0,65536*sizeof(*blue)); for (i=0; i < (ssize_t) image->colors; i++) { red[i]=ScaleQuantumToShort(image->colormap[i].red); green[i]=ScaleQuantumToShort(image->colormap[i].green); blue[i]=ScaleQuantumToShort(image->colormap[i].blue); } (void) TIFFSetField(tiff,TIFFTAG_COLORMAP,red,green,blue); red=(uint16 *) RelinquishMagickMemory(red); green=(uint16 *) RelinquishMagickMemory(green); blue=(uint16 *) RelinquishMagickMemory(blue); } default: { /* Convert PseudoClass packets to contiguous grayscale scanlines. */ quantum_type=IndexQuantum; if (image->matte != MagickFalse) { if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayAlphaQuantum; else quantum_type=IndexAlphaQuantum; } else if (photometric != PHOTOMETRIC_PALETTE) quantum_type=GrayQuantum; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,quantum_type,pixels,&image->exception); if (TIFFWritePixels(tiff,&tiff_info,y,0,image) == -1) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } } quantum_info=DestroyQuantumInfo(quantum_info); if (image->colorspace == LabColorspace) DecodeLabImage(image,&image->exception); DestroyTIFFInfo(&tiff_info); DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) RestoreMSCWarning TIFFPrintDirectory(tiff,stdout,MagickFalse); (void) TIFFWriteDirectory(tiff); image=SyncNextImageInList(image); if (image == (Image *) NULL) break; status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); TIFFClose(tiff); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4784_0
crossvul-cpp_data_bad_4774_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR L EEEEE % % R R L E % % RRRR L EEE % % R R L E % % R R LLLLL EEEEE % % % % % % Read URT RLE Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/pixel.h" #include "magick/property.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s R L E % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsRLE() returns MagickTrue if the image format type, identified by the % magick string, is RLE. % % The format of the ReadRLEImage method is: % % MagickBooleanType IsRLE(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsRLE(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\122\314",2) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadRLEImage() reads a run-length encoded Utah Raster Toolkit % image file and returns it. It allocates the memory necessary for the new % Image structure and returns a pointer to the new image. % % The format of the ReadRLEImage method is: % % Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % */ static Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define SkipLinesOp 0x01 #define SetColorOp 0x02 #define SkipPixelsOp 0x03 #define ByteDataOp 0x05 #define RunDataOp 0x06 #define EOFOp 0x07 char magick[12]; Image *image; IndexPacket index; int opcode, operand, status; MagickStatusType flags; MagickSizeType number_pixels; MemoryInfo *pixel_info; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; register unsigned char *p; size_t bits_per_pixel, map_length, number_colormaps, number_planes, number_planes_filled, one, offset, pixel_info_length; ssize_t count, y; unsigned char background_color[256], *colormap, pixel, plane, *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Determine if this a RLE file. */ count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 2) || (memcmp(magick,"\122\314",2) != 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); do { /* Read image header. */ image->page.x=ReadBlobLSBShort(image); image->page.y=ReadBlobLSBShort(image); image->columns=ReadBlobLSBShort(image); image->rows=ReadBlobLSBShort(image); flags=(MagickStatusType) ReadBlobByte(image); image->matte=flags & 0x04 ? MagickTrue : MagickFalse; number_planes=(size_t) ReadBlobByte(image); bits_per_pixel=(size_t) ReadBlobByte(image); number_colormaps=(size_t) ReadBlobByte(image); map_length=(unsigned char) ReadBlobByte(image); if (map_length >= 32) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); one=1; map_length=one << map_length; if ((number_planes == 0) || (number_planes == 2) || ((flags & 0x04) && (number_colormaps > 254)) || (bits_per_pixel != 8) || (image->columns == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (flags & 0x02) { /* No background color-- initialize to black. */ for (i=0; i < (ssize_t) number_planes; i++) background_color[i]=0; (void) ReadBlobByte(image); } else { /* Initialize background color. */ p=background_color; for (i=0; i < (ssize_t) number_planes; i++) *p++=(unsigned char) ReadBlobByte(image); } if ((number_planes & 0x01) == 0) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } colormap=(unsigned char *) NULL; if (number_colormaps != 0) { /* Read image colormaps. */ colormap=(unsigned char *) AcquireQuantumMemory(number_colormaps, 3*map_length*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; for (i=0; i < (ssize_t) number_colormaps; i++) for (x=0; x < (ssize_t) map_length; x++) *p++=(unsigned char) ScaleShortToQuantum(ReadBlobLSBShort(image)); } if ((flags & 0x08) != 0) { char *comment; size_t length; /* Read image comment. */ length=ReadBlobLSBShort(image); if (length != 0) { comment=(char *) AcquireQuantumMemory(length,sizeof(*comment)); if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,length-1,(unsigned char *) comment); comment[length-1]='\0'; (void) SetImageProperty(image,"comment",comment); comment=DestroyString(comment); if ((length & 0x01) == 0) (void) ReadBlobByte(image); } } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Allocate RLE pixels. */ if (image->matte != MagickFalse) number_planes++; number_pixels=(MagickSizeType) image->columns*image->rows; number_planes_filled=(number_planes % 2 == 0) ? number_planes : number_planes+1; if ((number_pixels*number_planes_filled) != (size_t) (number_pixels* number_planes_filled)) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory(image->columns,image->rows* number_planes_filled*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info_length=image->columns*image->rows*number_planes_filled; pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((flags & 0x01) && !(flags & 0x02)) { ssize_t j; /* Set background color. */ p=pixels; for (i=0; i < (ssize_t) number_pixels; i++) { if (image->matte == MagickFalse) for (j=0; j < (ssize_t) number_planes; j++) *p++=background_color[j]; else { for (j=0; j < (ssize_t) (number_planes-1); j++) *p++=background_color[j]; *p++=0; /* initialize matte channel */ } } } /* Read runlength-encoded image. */ plane=0; x=0; y=0; opcode=ReadBlobByte(image); do { switch (opcode & 0x3f) { case SkipLinesOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); x=0; y+=operand; break; } case SetColorOp: { operand=ReadBlobByte(image); plane=(unsigned char) operand; if (plane == 255) plane=(unsigned char) (number_planes-1); x=0; break; } case SkipPixelsOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); x+=operand; break; } case ByteDataOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); offset=((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane; operand++; if (offset+((size_t) operand*number_planes) > pixel_info_length) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } p=pixels+offset; for (i=0; i < (ssize_t) operand; i++) { pixel=(unsigned char) ReadBlobByte(image); if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } if (operand & 0x01) (void) ReadBlobByte(image); x+=operand; break; } case RunDataOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); pixel=(unsigned char) ReadBlobByte(image); (void) ReadBlobByte(image); operand++; offset=((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane; p=pixels+offset; if (offset+((size_t) operand*number_planes) > pixel_info_length) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } for (i=0; i < (ssize_t) operand; i++) { if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } x+=operand; break; } default: break; } opcode=ReadBlobByte(image); } while (((opcode & 0x3f) != EOFOp) && (opcode != EOF)); if (number_colormaps != 0) { MagickStatusType mask; /* Apply colormap affineation to image. */ mask=(MagickStatusType) (map_length-1); p=pixels; x=(ssize_t) number_planes; if (number_colormaps == 1) for (i=0; i < (ssize_t) number_pixels; i++) { if (IsValidColormapIndex(image,*p & mask,&index,exception) == MagickFalse) break; *p=colormap[(ssize_t) index]; p++; } else if ((number_planes >= 3) && (number_colormaps >= 3)) for (i=0; i < (ssize_t) number_pixels; i++) for (x=0; x < (ssize_t) number_planes; x++) { if (IsValidColormapIndex(image,(size_t) (x*map_length+ (*p & mask)),&index,exception) == MagickFalse) break; *p=colormap[(ssize_t) index]; p++; } if ((i < (ssize_t) number_pixels) || (x < (ssize_t) number_planes)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } } /* Initialize image structure. */ if (number_planes >= 3) { /* Convert raster image to DirectClass pixel packets. */ p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelBlue(q,ScaleCharToQuantum(*p++)); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleCharToQuantum(*p++)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else { /* Create colormap. */ if (number_colormaps == 0) map_length=256; if (AcquireImageColormap(image,map_length) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; if (number_colormaps == 1) for (i=0; i < (ssize_t) image->colors; i++) { /* Pseudocolor. */ image->colormap[i].red=ScaleCharToQuantum((unsigned char) i); image->colormap[i].green=ScaleCharToQuantum((unsigned char) i); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i); } else if (number_colormaps > 1) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p); image->colormap[i].green=ScaleCharToQuantum(*(p+map_length)); image->colormap[i].blue=ScaleCharToQuantum(*(p+map_length*2)); p++; } p=pixels; if (image->matte == MagickFalse) { /* Convert raster image to PseudoClass pixel packets. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*p++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); } else { /* Image has a matte channel-- promote to DirectClass. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelRed(q,image->colormap[(ssize_t) index].red); if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelGreen(q,image->colormap[(ssize_t) index].green); if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelBlue(q,image->colormap[(ssize_t) index].blue); SetPixelAlpha(q,ScaleCharToQuantum(*p++)); q++; } if (x < (ssize_t) image->columns) break; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } image->colormap=(PixelPacket *) RelinquishMagickMemory( image->colormap); image->storage_class=DirectClass; image->colors=0; } } if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; (void) ReadBlobByte(image); count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 0) && (memcmp(magick,"\122\314",2) == 0)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (memcmp(magick,"\122\314",2) == 0)); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterRLEImage() adds attributes for the RLE image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterRLEImage method is: % % size_t RegisterRLEImage(void) % */ ModuleExport size_t RegisterRLEImage(void) { MagickInfo *entry; entry=SetMagickInfo("RLE"); entry->decoder=(DecodeImageHandler *) ReadRLEImage; entry->magick=(IsImageFormatHandler *) IsRLE; entry->adjoin=MagickFalse; entry->description=ConstantString("Utah Run length encoded image"); entry->module=ConstantString("RLE"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterRLEImage() removes format registrations made by the % RLE module from the list of supported formats. % % The format of the UnregisterRLEImage method is: % % UnregisterRLEImage(void) % */ ModuleExport void UnregisterRLEImage(void) { (void) UnregisterMagickInfo("RLE"); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4774_0
crossvul-cpp_data_bad_342_8
/* * cryptoflex-tool.c: Tool for doing various Cryptoflex related stuff * * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "config.h" #include "libopensc/sc-ossl-compat.h" #include <openssl/bn.h> #include <openssl/rsa.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "libopensc/pkcs15.h" #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "util.h" static const char *app_name = "cryptoflex-tool"; static char * opt_reader = NULL; static int opt_wait = 0; static int opt_key_num = 1, opt_pin_num = -1; static int verbose = 0; static int opt_exponent = 3; static int opt_mod_length = 1024; static int opt_key_count = 1; static int opt_pin_attempts = 10; static int opt_puk_attempts = 10; static const char *opt_appdf = NULL, *opt_prkeyf = NULL, *opt_pubkeyf = NULL; static u8 *pincode = NULL; static const struct option options[] = { { "list-keys", 0, NULL, 'l' }, { "create-key-files", 1, NULL, 'c' }, { "create-pin-file", 1, NULL, 'P' }, { "generate-key", 0, NULL, 'g' }, { "read-key", 0, NULL, 'R' }, { "verify-pin", 0, NULL, 'V' }, { "key-num", 1, NULL, 'k' }, { "app-df", 1, NULL, 'a' }, { "prkey-file", 1, NULL, 'p' }, { "pubkey-file", 1, NULL, 'u' }, { "exponent", 1, NULL, 'e' }, { "modulus-length", 1, NULL, 'm' }, { "reader", 1, NULL, 'r' }, { "wait", 0, NULL, 'w' }, { "verbose", 0, NULL, 'v' }, { NULL, 0, NULL, 0 } }; static const char *option_help[] = { "Lists all keys in a public key file", "Creates new RSA key files for <arg> keys", "Creates a new CHV<arg> file", "Generates a new RSA key pair", "Reads a public key from the card", "Verifies CHV1 before issuing commands", "Selects which key number to operate on [1]", "Selects the DF to operate in", "Private key file", "Public key file", "The RSA exponent to use in key generation [3]", "Modulus length to use in key generation [1024]", "Uses reader <arg>", "Wait for card insertion", "Verbose operation. Use several times to enable debug output.", }; static sc_context_t *ctx = NULL; static sc_card_t *card = NULL; static char *getpin(const char *prompt) { char *buf, pass[20]; int i; printf("%s", prompt); fflush(stdout); if (fgets(pass, 20, stdin) == NULL) return NULL; for (i = 0; i < 20; i++) if (pass[i] == '\n') pass[i] = 0; if (strlen(pass) == 0) return NULL; buf = malloc(8); if (buf == NULL) return NULL; if (strlen(pass) > 8) { fprintf(stderr, "PIN code too long.\n"); free(buf); return NULL; } memset(buf, 0, 8); strlcpy(buf, pass, 8); return buf; } static int verify_pin(int pin) { char prompt[50]; int r, tries_left = -1; if (pincode == NULL) { sprintf(prompt, "Please enter CHV%d: ", pin); pincode = (u8 *) getpin(prompt); if (pincode == NULL || strlen((char *) pincode) == 0) return -1; } if (pin != 1 && pin != 2) return -3; r = sc_verify(card, SC_AC_CHV, pin, pincode, 8, &tries_left); if (r) { memset(pincode, 0, 8); free(pincode); pincode = NULL; fprintf(stderr, "PIN code verification failed: %s\n", sc_strerror(r)); return -1; } return 0; } static int select_app_df(void) { sc_path_t path; sc_file_t *file; char str[80]; int r; strcpy(str, "3F00"); if (opt_appdf != NULL) strlcat(str, opt_appdf, sizeof str); sc_format_path(str, &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select application DF: %s\n", sc_strerror(r)); return -1; } if (file->type != SC_FILE_TYPE_DF) { fprintf(stderr, "Selected application DF is not a DF.\n"); return -1; } sc_file_free(file); if (opt_pin_num >= 0) return verify_pin(opt_pin_num); else return 0; } static void invert_buf(u8 *dest, const u8 *src, size_t c) { size_t i; for (i = 0; i < c; i++) dest[i] = src[c-1-i]; } static BIGNUM * cf2bn(const u8 *buf, size_t bufsize, BIGNUM *num) { u8 tmp[512]; invert_buf(tmp, buf, bufsize); return BN_bin2bn(tmp, bufsize, num); } static int bn2cf(const BIGNUM *num, u8 *buf) { u8 tmp[512]; int r; r = BN_bn2bin(num, tmp); if (r <= 0) return r; invert_buf(buf, tmp, r); return r; } static int parse_public_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *n, *e; int base; base = (keysize - 7) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid public key.\n"); return -1; } p += 3; n = BN_new(); if (n == NULL) return -1; cf2bn(p, 2 * base, n); p += 2 * base; p += base; p += 2 * base; e = BN_new(); if (e == NULL) return -1; cf2bn(p, 4, e); if (RSA_set0_key(rsa, n, e, NULL) != 1) return -1; return 0; } static int gen_d(RSA *rsa) { BN_CTX *bnctx; BIGNUM *r0, *r1, *r2; const BIGNUM *rsa_p, *rsa_q, *rsa_n, *rsa_e, *rsa_d; BIGNUM *rsa_n_new, *rsa_e_new, *rsa_d_new; bnctx = BN_CTX_new(); if (bnctx == NULL) return -1; BN_CTX_start(bnctx); r0 = BN_CTX_get(bnctx); r1 = BN_CTX_get(bnctx); r2 = BN_CTX_get(bnctx); RSA_get0_key(rsa, &rsa_n, &rsa_e, &rsa_d); RSA_get0_factors(rsa, &rsa_p, &rsa_q); BN_sub(r1, rsa_p, BN_value_one()); BN_sub(r2, rsa_q, BN_value_one()); BN_mul(r0, r1, r2, bnctx); if ((rsa_d_new = BN_mod_inverse(NULL, rsa_e, r0, bnctx)) == NULL) { fprintf(stderr, "BN_mod_inverse() failed.\n"); return -1; } /* RSA_set0_key will free previous value, and replace with new value * Thus the need to copy the contents of rsa_n and rsa_e */ rsa_n_new = BN_dup(rsa_n); rsa_e_new = BN_dup(rsa_e); if (RSA_set0_key(rsa, rsa_n_new, rsa_e_new, rsa_d_new) != 1) return -1; BN_CTX_end(bnctx); BN_CTX_free(bnctx); return 0; } static int parse_private_key(const u8 *key, size_t keysize, RSA *rsa) { const u8 *p = key; BIGNUM *bn_p, *q, *dmp1, *dmq1, *iqmp; int base; base = (keysize - 3) / 5; if (base != 32 && base != 48 && base != 64 && base != 128) { fprintf(stderr, "Invalid private key.\n"); return -1; } p += 3; bn_p = BN_new(); if (bn_p == NULL) return -1; cf2bn(p, base, bn_p); p += base; q = BN_new(); if (q == NULL) return -1; cf2bn(p, base, q); p += base; iqmp = BN_new(); if (iqmp == NULL) return -1; cf2bn(p, base, iqmp); p += base; dmp1 = BN_new(); if (dmp1 == NULL) return -1; cf2bn(p, base, dmp1); p += base; dmq1 = BN_new(); if (dmq1 == NULL) return -1; cf2bn(p, base, dmq1); p += base; if (RSA_set0_factors(rsa, bn_p, q) != 1) return -1; if (RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp) != 1) return -1; if (gen_d(rsa)) return -1; return 0; } static int read_public_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_public_key(p, keysize, rsa); } static int read_private_key(RSA *rsa) { int r; sc_path_t path; sc_file_t *file; const sc_acl_entry_t *e; u8 buf[2048], *p = buf; size_t bufsize, keysize; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, &file); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } e = sc_file_get_acl_entry(file, SC_AC_OP_READ); if (e == NULL || e->method == SC_AC_NEVER) return 10; bufsize = file->size; sc_file_free(file); r = sc_read_binary(card, 0, buf, bufsize, 0); if (r < 0) { fprintf(stderr, "Unable to read private key file: %s\n", sc_strerror(r)); return 2; } bufsize = r; do { if (bufsize < 4) return 3; keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; if (keysize < 3) return 3; if (p[2] == opt_key_num) break; p += keysize; bufsize -= keysize; } while (1); if (keysize == 0) { printf("Key number %d not found.\n", opt_key_num); return 2; } return parse_private_key(p, keysize, rsa); } static int read_key(void) { RSA *rsa = RSA_new(); u8 buf[1024], *p = buf; u8 b64buf[2048]; int r; if (rsa == NULL) return -1; r = read_public_key(rsa); if (r) return r; r = i2d_RSA_PUBKEY(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding public key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN PUBLIC KEY-----\n%s-----END PUBLIC KEY-----\n", b64buf); r = read_private_key(rsa); if (r == 10) return 0; else if (r) return r; p = buf; r = i2d_RSAPrivateKey(rsa, &p); if (r <= 0) { fprintf(stderr, "Error encoding private key.\n"); return -1; } r = sc_base64_encode(buf, r, b64buf, sizeof(b64buf), 64); if (r < 0) { fprintf(stderr, "Error in Base64 encoding: %s\n", sc_strerror(r)); return -1; } printf("-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n", b64buf); return 0; } static int list_keys(void) { int r, idx = 0; sc_path_t path; u8 buf[2048], *p = buf; size_t keysize, i; int mod_lens[] = { 512, 768, 1024, 2048 }; size_t sizes[] = { 167, 247, 327, 647 }; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } do { int mod_len = -1; r = sc_read_binary(card, idx, buf, 3, 0); if (r < 0) { fprintf(stderr, "Unable to read public key file: %s\n", sc_strerror(r)); return 2; } keysize = (p[0] << 8) | p[1]; if (keysize == 0) break; idx += keysize; for (i = 0; i < sizeof(sizes)/sizeof(sizes[ 0]); i++) if (sizes[i] == keysize) mod_len = mod_lens[i]; if (mod_len < 0) printf("Key %d -- unknown modulus length\n", p[2] & 0x0F); else printf("Key %d -- Modulus length %d\n", p[2] & 0x0F, mod_len); } while (1); return 0; } static int generate_key(void) { sc_apdu_t apdu; u8 sbuf[4]; u8 p2; int r; switch (opt_mod_length) { case 512: p2 = 0x40; break; case 768: p2 = 0x60; break; case 1024: p2 = 0x80; break; case 2048: p2 = 0x00; break; default: fprintf(stderr, "Invalid modulus length.\n"); return 2; } sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, (u8) opt_key_num-1, p2); apdu.cla = 0xF0; apdu.lc = 4; apdu.datalen = 4; apdu.data = sbuf; sbuf[0] = opt_exponent & 0xFF; sbuf[1] = (opt_exponent >> 8) & 0xFF; sbuf[2] = (opt_exponent >> 16) & 0xFF; sbuf[3] = (opt_exponent >> 24) & 0xFF; r = select_app_df(); if (r) return 1; if (verbose) printf("Generating key...\n"); r = sc_transmit_apdu(card, &apdu); if (r) { fprintf(stderr, "APDU transmit failed: %s\n", sc_strerror(r)); if (r == SC_ERROR_TRANSMIT_FAILED) fprintf(stderr, "Reader has timed out. It is still possible that the key generation has\n" "succeeded.\n"); return 1; } if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { printf("Key generation successful.\n"); return 0; } if (apdu.sw1 == 0x69 && apdu.sw2 == 0x82) fprintf(stderr, "CHV1 not verified or invalid exponent value.\n"); else fprintf(stderr, "Card returned SW1=%02X, SW2=%02X.\n", apdu.sw1, apdu.sw2); return 1; } static int create_key_files(void) { sc_file_t *file; int mod_lens[] = { 512, 768, 1024, 2048 }; int sizes[] = { 163, 243, 323, 643 }; int size = -1; int r; size_t i; for (i = 0; i < sizeof(mod_lens) / sizeof(int); i++) if (mod_lens[i] == opt_mod_length) { size = sizes[i]; break; } if (size == -1) { fprintf(stderr, "Invalid modulus length.\n"); return 1; } if (verbose) printf("Creating key files for %d keys.\n", opt_key_count); file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x0012; file->size = opt_key_count * size + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create private key file: %s\n", sc_strerror(r)); return 1; } file = sc_file_new(); if (!file) { fprintf(stderr, "out of memory.\n"); return 1; } file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; file->id = 0x1012; file->size = opt_key_count * (size + 4) + 3; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_CHV, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_CHV, 1); if (select_app_df()) { sc_file_free(file); return 1; } r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "Unable to create public key file: %s\n", sc_strerror(r)); return 1; } if (verbose) printf("Key files generated successfully.\n"); return 0; } static int read_rsa_privkey(RSA **rsa_out) { RSA *rsa = NULL; BIO *in = NULL; int r; in = BIO_new(BIO_s_file()); if (opt_prkeyf == NULL) { fprintf(stderr, "Private key file must be set.\n"); return 2; } r = BIO_read_filename(in, opt_prkeyf); if (r <= 0) { perror(opt_prkeyf); return 2; } rsa = PEM_read_bio_RSAPrivateKey(in, NULL, NULL, NULL); if (rsa == NULL) { fprintf(stderr, "Unable to load private key.\n"); return 2; } BIO_free(in); *rsa_out = rsa; return 0; } static int encode_private_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_p, *rsa_q, *rsa_dmp1, *rsa_dmq1, *rsa_iqmp; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 3) >> 8; *p++ = (5 * base + 3) & 0xFF; *p++ = opt_key_num; RSA_get0_factors(rsa, &rsa_p, &rsa_q); r = bn2cf(rsa_p, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_q, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; RSA_get0_crt_params(rsa, &rsa_dmp1, &rsa_dmq1, &rsa_iqmp); r = bn2cf(rsa_iqmp, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmp1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; r = bn2cf(rsa_dmq1, bnbuf); if (r != base) { fprintf(stderr, "Invalid private key.\n"); return 2; } memcpy(p, bnbuf, base); p += base; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize) { u8 buf[1024], *p = buf; u8 bnbuf[256]; int base = 0; int r; const BIGNUM *rsa_n, *rsa_e; switch (RSA_bits(rsa)) { case 512: base = 32; break; case 768: base = 48; break; case 1024: base = 64; break; case 2048: base = 128; break; } if (base == 0) { fprintf(stderr, "Key length invalid.\n"); return 2; } *p++ = (5 * base + 7) >> 8; *p++ = (5 * base + 7) & 0xFF; *p++ = opt_key_num; RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL); r = bn2cf(rsa_n, bnbuf); if (r != 2*base) { fprintf(stderr, "Invalid public key.\n"); return 2; } memcpy(p, bnbuf, 2*base); p += 2*base; memset(p, 0, base); p += base; memset(bnbuf, 0, 2*base); memcpy(p, bnbuf, 2*base); p += 2*base; r = bn2cf(rsa_e, bnbuf); memcpy(p, bnbuf, 4); p += 4; memcpy(key, buf, p - buf); *keysize = p - buf; return 0; } static int update_public_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I1012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select public key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write public key: %s\n", sc_strerror(r)); return 2; } return 0; } static int update_private_key(const u8 *key, size_t keysize) { int r, idx = 0; sc_path_t path; r = select_app_df(); if (r) return 1; sc_format_path("I0012", &path); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select private key file: %s\n", sc_strerror(r)); return 2; } idx = keysize * (opt_key_num-1); r = sc_update_binary(card, idx, key, keysize, 0); if (r < 0) { fprintf(stderr, "Unable to write private key: %s\n", sc_strerror(r)); return 2; } return 0; } static int store_key(void) { u8 prv[1024], pub[1024]; size_t prvsize, pubsize; int r; RSA *rsa; r = read_rsa_privkey(&rsa); if (r) return r; r = encode_private_key(rsa, prv, &prvsize); if (r) return r; r = encode_public_key(rsa, pub, &pubsize); if (r) return r; if (verbose) printf("Storing private key...\n"); r = select_app_df(); if (r) return r; r = update_private_key(prv, prvsize); if (r) return r; if (verbose) printf("Storing public key...\n"); r = select_app_df(); if (r) return r; r = update_public_key(pub, pubsize); if (r) return r; return 0; } static int create_pin_file(const sc_path_t *inpath, int chv, const char *key_id) { char prompt[40], *pin, *puk; char buf[30], *p = buf; sc_path_t file_id, path; sc_file_t *file; size_t len; int r; file_id = *inpath; if (file_id.len < 2) return -1; if (chv == 1) sc_format_path("I0000", &file_id); else if (chv == 2) sc_format_path("I0100", &file_id); else return -1; r = sc_select_file(card, inpath, NULL); if (r) return -1; r = sc_select_file(card, &file_id, NULL); if (r == 0) return 0; sprintf(prompt, "Please enter CHV%d%s: ", chv, key_id); pin = getpin(prompt); if (pin == NULL) return -1; sprintf(prompt, "Please enter PUK for CHV%d%s: ", chv, key_id); puk = getpin(prompt); if (puk == NULL) { free(pin); return -1; } memset(p, 0xFF, 3); p += 3; memcpy(p, pin, 8); p += 8; *p++ = opt_pin_attempts; *p++ = opt_pin_attempts; memcpy(p, puk, 8); p += 8; *p++ = opt_puk_attempts; *p++ = opt_puk_attempts; len = p - buf; free(pin); free(puk); file = sc_file_new(); file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; sc_file_add_acl_entry(file, SC_AC_OP_READ, SC_AC_NEVER, SC_AC_KEY_REF_NONE); if (inpath->len == 2 && inpath->value[0] == 0x3F && inpath->value[1] == 0x00) sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_AUT, 1); else sc_file_add_acl_entry(file, SC_AC_OP_UPDATE, SC_AC_CHV, 2); sc_file_add_acl_entry(file, SC_AC_OP_INVALIDATE, SC_AC_AUT, 1); sc_file_add_acl_entry(file, SC_AC_OP_REHABILITATE, SC_AC_AUT, 1); file->size = len; file->id = (file_id.value[0] << 8) | file_id.value[1]; r = sc_create_file(card, file); sc_file_free(file); if (r) { fprintf(stderr, "PIN file creation failed: %s\n", sc_strerror(r)); return r; } path = *inpath; sc_append_path(&path, &file_id); r = sc_select_file(card, &path, NULL); if (r) { fprintf(stderr, "Unable to select created PIN file: %s\n", sc_strerror(r)); return r; } r = sc_update_binary(card, 0, (const u8 *) buf, len, 0); if (r < 0) { fprintf(stderr, "Unable to update created PIN file: %s\n", sc_strerror(r)); return r; } return 0; } static int create_pin(void) { sc_path_t path; char buf[80]; if (opt_pin_num != 1 && opt_pin_num != 2) { fprintf(stderr, "Invalid PIN number. Possible values: 1, 2.\n"); return 2; } strcpy(buf, "3F00"); if (opt_appdf != NULL) strlcat(buf, opt_appdf, sizeof buf); sc_format_path(buf, &path); return create_pin_file(&path, opt_pin_num, ""); } int main(int argc, char *argv[]) { int err = 0, r, c, long_optind = 0; int action_count = 0; int do_read_key = 0; int do_generate_key = 0; int do_create_key_files = 0; int do_list_keys = 0; int do_store_key = 0; int do_create_pin_file = 0; sc_context_param_t ctx_param; while (1) { c = getopt_long(argc, argv, "P:Vslgc:Rk:r:p:u:e:m:vwa:", options, &long_optind); if (c == -1) break; if (c == '?') util_print_usage_and_die(app_name, options, option_help, NULL); switch (c) { case 'l': do_list_keys = 1; action_count++; break; case 'P': do_create_pin_file = 1; opt_pin_num = atoi(optarg); action_count++; break; case 'R': do_read_key = 1; action_count++; break; case 'g': do_generate_key = 1; action_count++; break; case 'c': do_create_key_files = 1; opt_key_count = atoi(optarg); action_count++; break; case 's': do_store_key = 1; action_count++; break; case 'k': opt_key_num = atoi(optarg); if (opt_key_num < 1 || opt_key_num > 15) { fprintf(stderr, "Key number invalid.\n"); exit(2); } break; case 'V': opt_pin_num = 1; break; case 'e': opt_exponent = atoi(optarg); break; case 'm': opt_mod_length = atoi(optarg); break; case 'p': opt_prkeyf = optarg; break; case 'u': opt_pubkeyf = optarg; break; case 'r': opt_reader = optarg; break; case 'v': verbose++; break; case 'w': opt_wait = 1; break; case 'a': opt_appdf = optarg; break; } } if (action_count == 0) util_print_usage_and_die(app_name, options, option_help, NULL); memset(&ctx_param, 0, sizeof(ctx_param)); ctx_param.ver = 0; ctx_param.app_name = app_name; r = sc_context_create(&ctx, &ctx_param); if (r) { fprintf(stderr, "Failed to establish context: %s\n", sc_strerror(r)); return 1; } if (verbose > 1) { ctx->debug = verbose; sc_ctx_log_to_file(ctx, "stderr"); } err = util_connect_card(ctx, &card, opt_reader, opt_wait, verbose); printf("Using card driver: %s\n", card->driver->name); if (do_create_pin_file) { if ((err = create_pin()) != 0) goto end; action_count--; } if (do_create_key_files) { if ((err = create_key_files()) != 0) goto end; action_count--; } if (do_generate_key) { if ((err = generate_key()) != 0) goto end; action_count--; } if (do_store_key) { if ((err = store_key()) != 0) goto end; action_count--; } if (do_list_keys) { if ((err = list_keys()) != 0) goto end; action_count--; } if (do_read_key) { if ((err = read_key()) != 0) goto end; action_count--; } if (pincode != NULL) { memset(pincode, 0, 8); free(pincode); } end: if (card) { sc_unlock(card); sc_disconnect_card(card); } if (ctx) sc_release_context(ctx); return err; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_342_8
crossvul-cpp_data_good_5589_1
/* * linux/kernel/printk.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Modified to make sys_syslog() more flexible: added commands to * return the last 4k of kernel messages, regardless of whether * they've been read or not. Added option to suppress kernel printk's * to the console. Added hook for sending the console messages * elsewhere, in preparation for a serial line console (someday). * Ted Ts'o, 2/11/93. * Modified for sysctl support, 1/8/97, Chris Horn. * Fixed SMP synchronization, 08/08/99, Manfred Spraul * manfred@colorfullife.com * Rewrote bits to get rid of console_lock * 01Mar01 Andrew Morton */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/console.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/nmi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> /* For in_interrupt() */ #include <linux/delay.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/syscalls.h> #include <linux/kexec.h> #include <linux/kdb.h> #include <linux/ratelimit.h> #include <linux/kmsg_dump.h> #include <linux/syslog.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/rculist.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include <trace/events/printk.h> /* * Architectures can override it: */ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) { } #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL /* We show everything that is MORE important than this.. */ #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ DECLARE_WAIT_QUEUE_HEAD(log_wait); int console_printk[4] = { DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ }; /* * Low level drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. */ int oops_in_progress; EXPORT_SYMBOL(oops_in_progress); /* * console_sem protects the console_drivers list, and also * provides serialisation for access to the entire console * driver system. */ static DEFINE_SEMAPHORE(console_sem); struct console *console_drivers; EXPORT_SYMBOL_GPL(console_drivers); /* * This is used for debugging the mess that is the VT code by * keeping track if we have the console semaphore held. It's * definitely not the perfect debug tool (we don't know if _WE_ * hold it are racing, but it helps tracking those weird code * path in the console code where we end up in places I want * locked without the console sempahore held */ static int console_locked, console_suspended; /* * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars * It is also used in interesting ways to provide interlocking in * console_unlock();. */ static DEFINE_RAW_SPINLOCK(logbuf_lock); #define LOG_BUF_MASK (log_buf_len-1) #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) /* * The indices into log_buf are not constrained to log_buf_len - they * must be masked before subscripting */ static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */ static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */ static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */ /* * If exclusive_console is non-NULL then only this console is to be printed to. */ static struct console *exclusive_console; /* * Array of consoles built from command line options (console=) */ struct console_cmdline { char name[8]; /* Name of the driver */ int index; /* Minor dev. to use */ char *options; /* Options for the driver */ #ifdef CONFIG_A11Y_BRAILLE_CONSOLE char *brl_options; /* Options for braille driver */ #endif }; #define MAX_CMDLINECONSOLES 8 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; static int selected_console = -1; static int preferred_console = -1; int console_set_on_cmdline; EXPORT_SYMBOL(console_set_on_cmdline); /* Flag: console code may call schedule() */ static int console_may_schedule; #ifdef CONFIG_PRINTK static char __log_buf[__LOG_BUF_LEN]; static char *log_buf = __log_buf; static int log_buf_len = __LOG_BUF_LEN; static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ static int saved_console_loglevel = -1; #ifdef CONFIG_KEXEC /* * This appends the listed symbols to /proc/vmcoreinfo * * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to * obtain access to symbols that are otherwise very difficult to locate. These * symbols are specifically used so that utilities can access and extract the * dmesg log from a vmcore file after a crash. */ void log_buf_kexec_setup(void) { VMCOREINFO_SYMBOL(log_buf); VMCOREINFO_SYMBOL(log_end); VMCOREINFO_SYMBOL(log_buf_len); VMCOREINFO_SYMBOL(logged_chars); } #endif /* requested log_buf_len from kernel cmdline */ static unsigned long __initdata new_log_buf_len; /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { unsigned size = memparse(str, &str); if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) new_log_buf_len = size; return 0; } early_param("log_buf_len", log_buf_len_setup); void __init setup_log_buf(int early) { unsigned long flags; unsigned start, dest_idx, offset; char *new_log_buf; int free; if (!new_log_buf_len) return; if (early) { unsigned long mem; mem = memblock_alloc(new_log_buf_len, PAGE_SIZE); if (!mem) return; new_log_buf = __va(mem); } else { new_log_buf = alloc_bootmem_nopanic(new_log_buf_len); } if (unlikely(!new_log_buf)) { pr_err("log_buf_len: %ld bytes not available\n", new_log_buf_len); return; } raw_spin_lock_irqsave(&logbuf_lock, flags); log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; free = __LOG_BUF_LEN - log_end; offset = start = min(con_start, log_start); dest_idx = 0; while (start != log_end) { unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1); log_buf[dest_idx] = __log_buf[log_idx_mask]; start++; dest_idx++; } log_start -= offset; con_start -= offset; log_end -= offset; raw_spin_unlock_irqrestore(&logbuf_lock, flags); pr_info("log_buf_len: %d\n", log_buf_len); pr_info("early log buf free: %d(%d%%)\n", free, (free * 100) / __LOG_BUF_LEN); } #ifdef CONFIG_BOOT_PRINTK_DELAY static int boot_delay; /* msecs delay after each printk during bootup */ static unsigned long long loops_per_msec; /* based on boot_delay */ static int __init boot_delay_setup(char *str) { unsigned long lpj; lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ loops_per_msec = (unsigned long long)lpj / 1000 * HZ; get_option(&str, &boot_delay); if (boot_delay > 10 * 1000) boot_delay = 0; pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " "HZ: %d, loops_per_msec: %llu\n", boot_delay, preset_lpj, lpj, HZ, loops_per_msec); return 1; } __setup("boot_delay=", boot_delay_setup); static void boot_delay_msec(void) { unsigned long long k; unsigned long timeout; if (boot_delay == 0 || system_state != SYSTEM_BOOTING) return; k = (unsigned long long)loops_per_msec * boot_delay; timeout = jiffies + msecs_to_jiffies(boot_delay); while (k) { k--; cpu_relax(); /* * use (volatile) jiffies to prevent * compiler reduction; loop termination via jiffies * is secondary and may or may not happen. */ if (time_after(jiffies, timeout)) break; touch_nmi_watchdog(); } } #else static inline void boot_delay_msec(void) { } #endif #ifdef CONFIG_SECURITY_DMESG_RESTRICT int dmesg_restrict = 1; #else int dmesg_restrict; #endif static int syslog_action_restricted(int type) { if (dmesg_restrict) return 1; /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; } static int check_syslog_permissions(int type, bool from_file) { /* * If this is from /proc/kmsg and we've already opened it, then we've * already done the capabilities checks at open time. */ if (from_file && type != SYSLOG_ACTION_OPEN) return 0; if (syslog_action_restricted(type)) { if (capable(CAP_SYSLOG)) return 0; /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ if (capable(CAP_SYS_ADMIN)) { printk_once(KERN_WARNING "%s (%d): " "Attempt to access syslog with CAP_SYS_ADMIN " "but no CAP_SYSLOG (deprecated).\n", current->comm, task_pid_nr(current)); return 0; } return -EPERM; } return 0; } int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; int do_clear = 0; char c; int error; error = check_syslog_permissions(type, from_file); if (error) goto out; error = security_syslog(type); if (error) return error; switch (type) { case SYSLOG_ACTION_CLOSE: /* Close log */ break; case SYSLOG_ACTION_OPEN: /* Open log */ break; case SYSLOG_ACTION_READ: /* Read from log */ error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(log_wait, (log_start - log_end)); if (error) goto out; i = 0; raw_spin_lock_irq(&logbuf_lock); while (!error && (log_start != log_end) && i < len) { c = LOG_BUF(log_start); log_start++; raw_spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); raw_spin_lock_irq(&logbuf_lock); } raw_spin_unlock_irq(&logbuf_lock); if (!error) error = i; break; /* Read/clear last kernel messages */ case SYSLOG_ACTION_READ_CLEAR: do_clear = 1; /* FALL THRU */ /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: error = -EINVAL; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } count = len; if (count > log_buf_len) count = log_buf_len; raw_spin_lock_irq(&logbuf_lock); if (count > logged_chars) count = logged_chars; if (do_clear) logged_chars = 0; limit = log_end; /* * __put_user() could sleep, and while we sleep * printk() could overwrite the messages * we try to copy to user space. Therefore * the messages are copied in reverse. <manfreds> */ for (i = 0; i < count && !error; i++) { j = limit-1-i; if (j + log_buf_len < log_end) break; c = LOG_BUF(j); raw_spin_unlock_irq(&logbuf_lock); error = __put_user(c,&buf[count-1-i]); cond_resched(); raw_spin_lock_irq(&logbuf_lock); } raw_spin_unlock_irq(&logbuf_lock); if (error) break; error = i; if (i != count) { int offset = count-error; /* buffer overflow during copy, correct user buffer. */ for (i = 0; i < error; i++) { if (__get_user(c,&buf[i+offset]) || __put_user(c,&buf[i])) { error = -EFAULT; break; } cond_resched(); } } break; /* Clear ring buffer */ case SYSLOG_ACTION_CLEAR: logged_chars = 0; break; /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_OFF: if (saved_console_loglevel == -1) saved_console_loglevel = console_loglevel; console_loglevel = minimum_console_loglevel; break; /* Enable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: if (saved_console_loglevel != -1) { console_loglevel = saved_console_loglevel; saved_console_loglevel = -1; } break; /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: error = -EINVAL; if (len < 1 || len > 8) goto out; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; /* Implicitly re-enable logging to console */ saved_console_loglevel = -1; error = 0; break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: error = log_end - log_start; break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: error = log_buf_len; break; default: error = -EINVAL; break; } out: return error; } SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) { return do_syslog(type, buf, len, SYSLOG_FROM_CALL); } #ifdef CONFIG_KGDB_KDB /* kdb dmesg command needs access to the syslog buffer. do_syslog() * uses locks so it cannot be used during debugging. Just tell kdb * where the start and end of the physical and logical logs are. This * is equivalent to do_syslog(3). */ void kdb_syslog_data(char *syslog_data[4]) { syslog_data[0] = log_buf; syslog_data[1] = log_buf + log_buf_len; syslog_data[2] = log_buf + log_end - (logged_chars < log_buf_len ? logged_chars : log_buf_len); syslog_data[3] = log_buf + log_end; } #endif /* CONFIG_KGDB_KDB */ /* * Call the console drivers on a range of log_buf */ static void __call_console_drivers(unsigned start, unsigned end) { struct console *con; for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; if ((con->flags & CON_ENABLED) && con->write && (cpu_online(smp_processor_id()) || (con->flags & CON_ANYTIME))) con->write(con, &LOG_BUF(start), end - start); } } static bool __read_mostly ignore_loglevel; static int __init ignore_loglevel_setup(char *str) { ignore_loglevel = 1; printk(KERN_INFO "debug: ignoring loglevel setting.\n"); return 0; } early_param("ignore_loglevel", ignore_loglevel_setup); module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to" "print all kernel messages to the console."); /* * Write out chars from start to end - 1 inclusive */ static void _call_console_drivers(unsigned start, unsigned end, int msg_log_level) { trace_console(&LOG_BUF(0), start, end, log_buf_len); if ((msg_log_level < console_loglevel || ignore_loglevel) && console_drivers && start != end) { if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) { /* wrapped write */ __call_console_drivers(start & LOG_BUF_MASK, log_buf_len); __call_console_drivers(0, end & LOG_BUF_MASK); } else { __call_console_drivers(start, end); } } } /* * Parse the syslog header <[0-9]*>. The decimal value represents 32bit, the * lower 3 bit are the log level, the rest are the log facility. In case * userspace passes usual userspace syslog messages to /dev/kmsg or * /dev/ttyprintk, the log prefix might contain the facility. Printk needs * to extract the correct log level for in-kernel processing, and not mangle * the original value. * * If a prefix is found, the length of the prefix is returned. If 'level' is * passed, it will be filled in with the log level without a possible facility * value. If 'special' is passed, the special printk prefix chars are accepted * and returned. If no valid header is found, 0 is returned and the passed * variables are not touched. */ static size_t log_prefix(const char *p, unsigned int *level, char *special) { unsigned int lev = 0; char sp = '\0'; size_t len; if (p[0] != '<' || !p[1]) return 0; if (p[2] == '>') { /* usual single digit level number or special char */ switch (p[1]) { case '0' ... '7': lev = p[1] - '0'; break; case 'c': /* KERN_CONT */ case 'd': /* KERN_DEFAULT */ sp = p[1]; break; default: return 0; } len = 3; } else { /* multi digit including the level and facility number */ char *endp = NULL; lev = (simple_strtoul(&p[1], &endp, 10) & 7); if (endp == NULL || endp[0] != '>') return 0; len = (endp + 1) - p; } /* do not accept special char if not asked for */ if (sp && !special) return 0; if (special) { *special = sp; /* return special char, do not touch level */ if (sp) return len; } if (level) *level = lev; return len; } /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. * The console_lock must be held. */ static void call_console_drivers(unsigned start, unsigned end) { unsigned cur_index, start_print; static int msg_level = -1; BUG_ON(((int)(start - end)) > 0); cur_index = start; start_print = start; while (cur_index != end) { if (msg_level < 0 && ((end - cur_index) > 2)) { /* * prepare buf_prefix, as a contiguous array, * to be processed by log_prefix function */ char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1]; unsigned i; for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) { buf_prefix[i] = LOG_BUF(cur_index + i); } buf_prefix[i] = '\0'; /* force '\0' as last string character */ /* strip log prefix */ cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL); start_print = cur_index; } while (cur_index != end) { char c = LOG_BUF(cur_index); cur_index++; if (c == '\n') { if (msg_level < 0) { /* * printk() has already given us loglevel tags in * the buffer. This code is here in case the * log buffer has wrapped right round and scribbled * on those tags */ msg_level = default_message_loglevel; } _call_console_drivers(start_print, cur_index, msg_level); msg_level = -1; start_print = cur_index; break; } } } _call_console_drivers(start_print, end, msg_level); } static void emit_log_char(char c) { LOG_BUF(log_end) = c; log_end++; if (log_end - log_start > log_buf_len) log_start = log_end - log_buf_len; if (log_end - con_start > log_buf_len) con_start = log_end - log_buf_len; if (logged_chars < log_buf_len) logged_chars++; } /* * Zap console related locks when oopsing. Only zap at most once * every 10 seconds, to leave time for slow consoles to print a * full oops. */ static void zap_locks(void) { static unsigned long oops_timestamp; if (time_after_eq(jiffies, oops_timestamp) && !time_after(jiffies, oops_timestamp + 30 * HZ)) return; oops_timestamp = jiffies; debug_locks_off(); /* If a crash is occurring, make sure we can't deadlock */ raw_spin_lock_init(&logbuf_lock); /* And make sure that we print immediately */ sema_init(&console_sem, 1); } #if defined(CONFIG_PRINTK_TIME) static bool printk_time = 1; #else static bool printk_time = 0; #endif module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); static bool always_kmsg_dump; module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); /* Check if we have any console registered that can be called early in boot. */ static int have_callable_console(void) { struct console *con; for_each_console(con) if (con->flags & CON_ANYTIME) return 1; return 0; } /** * printk - print a kernel message * @fmt: format string * * This is printk(). It can be called from any context. We want it to work. * * We try to grab the console_lock. If we succeed, it's easy - we log the output and * call the console drivers. If we fail to get the semaphore we place the output * into the log buffer and return. The current holder of the console_sem will * notice the new output in console_unlock(); and will send it to the * consoles before releasing the lock. * * One effect of this deferred printing is that code which calls printk() and * then changes console_loglevel may break. This is because console_loglevel * is inspected when the actual printing occurs. * * See also: * printf(3) * * See the vsnprintf() documentation for format string extensions over C99. */ asmlinkage int printk(const char *fmt, ...) { va_list args; int r; #ifdef CONFIG_KGDB_KDB if (unlikely(kdb_trap_printk)) { va_start(args, fmt); r = vkdb_printf(fmt, args); va_end(args); return r; } #endif va_start(args, fmt); r = vprintk(fmt, args); va_end(args); return r; } /* cpu currently holding logbuf_lock */ static volatile unsigned int printk_cpu = UINT_MAX; /* * Can we actually use the console at this time on this cpu? * * Console drivers may assume that per-cpu resources have * been allocated. So unless they're explicitly marked as * being able to cope (CON_ANYTIME) don't call them until * this CPU is officially up. */ static inline int can_use_console(unsigned int cpu) { return cpu_online(cpu) || have_callable_console(); } /* * Try to get console ownership to actually show the kernel * messages from a 'printk'. Return true (and with the * console_lock held, and 'console_locked' set) if it * is successful, false otherwise. * * This gets called with the 'logbuf_lock' spinlock held and * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ static int console_trylock_for_printk(unsigned int cpu) __releases(&logbuf_lock) { int retval = 0, wake = 0; if (console_trylock()) { retval = 1; /* * If we can't use the console, we need to release * the console semaphore by hand to avoid flushing * the buffer. We need to hold the console semaphore * in order to do this test safely. */ if (!can_use_console(cpu)) { console_locked = 0; wake = 1; retval = 0; } } printk_cpu = UINT_MAX; if (wake) up(&console_sem); raw_spin_unlock(&logbuf_lock); return retval; } static const char recursion_bug_msg [] = KERN_CRIT "BUG: recent printk recursion!\n"; static int recursion_bug; static int new_text_line = 1; static char printk_buf[1024]; int printk_delay_msec __read_mostly; static inline void printk_delay(void) { if (unlikely(printk_delay_msec)) { int m = printk_delay_msec; while (m--) { mdelay(1); touch_nmi_watchdog(); } } } asmlinkage int vprintk(const char *fmt, va_list args) { int printed_len = 0; int current_log_level = default_message_loglevel; unsigned long flags; int this_cpu; char *p; size_t plen; char special; boot_delay_msec(); printk_delay(); /* This stops the holder of console_sem just where we want him */ local_irq_save(flags); this_cpu = smp_processor_id(); /* * Ouch, printk recursed into itself! */ if (unlikely(printk_cpu == this_cpu)) { /* * If a crash is occurring during printk() on this CPU, * then try to get the crash message out but make sure * we can't deadlock. Otherwise just return to avoid the * recursion and return - but flag the recursion so that * it can be printed at the next appropriate moment: */ if (!oops_in_progress && !lockdep_recursing(current)) { recursion_bug = 1; goto out_restore_irqs; } zap_locks(); } lockdep_off(); raw_spin_lock(&logbuf_lock); printk_cpu = this_cpu; if (recursion_bug) { recursion_bug = 0; strcpy(printk_buf, recursion_bug_msg); printed_len = strlen(recursion_bug_msg); } /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); p = printk_buf; /* Read log level and handle special printk prefix */ plen = log_prefix(p, &current_log_level, &special); if (plen) { p += plen; switch (special) { case 'c': /* Strip <c> KERN_CONT, continue line */ plen = 0; break; case 'd': /* Strip <d> KERN_DEFAULT, start new line */ plen = 0; default: if (!new_text_line) { emit_log_char('\n'); new_text_line = 1; } } } /* * Copy the output into log_buf. If the caller didn't provide * the appropriate log prefix, we insert them here */ for (; *p; p++) { if (new_text_line) { new_text_line = 0; if (plen) { /* Copy original log prefix */ int i; for (i = 0; i < plen; i++) emit_log_char(printk_buf[i]); printed_len += plen; } else { /* Add log prefix */ emit_log_char('<'); emit_log_char(current_log_level + '0'); emit_log_char('>'); printed_len += 3; } if (printk_time) { /* Add the current time stamp */ char tbuf[50], *tp; unsigned tlen; unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(printk_cpu); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "[%5lu.%06lu] ", (unsigned long) t, nanosec_rem / 1000); for (tp = tbuf; tp < tbuf + tlen; tp++) emit_log_char(*tp); printed_len += tlen; } if (!*p) break; } emit_log_char(*p); if (*p == '\n') new_text_line = 1; } /* * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, * etc). * * The console_trylock_for_printk() function * will release 'logbuf_lock' regardless of whether it * actually gets the semaphore or not. */ if (console_trylock_for_printk(this_cpu)) console_unlock(); lockdep_on(); out_restore_irqs: local_irq_restore(flags); return printed_len; } EXPORT_SYMBOL(printk); EXPORT_SYMBOL(vprintk); #else static void call_console_drivers(unsigned start, unsigned end) { } #endif static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { struct console_cmdline *c; int i; /* * See if this tty is not yet registered, and * if we have a slot free. */ for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) if (strcmp(console_cmdline[i].name, name) == 0 && console_cmdline[i].index == idx) { if (!brl_options) selected_console = i; return 0; } if (i == MAX_CMDLINECONSOLES) return -E2BIG; if (!brl_options) selected_console = i; c = &console_cmdline[i]; strlcpy(c->name, name, sizeof(c->name)); c->options = options; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE c->brl_options = brl_options; #endif c->index = idx; return 0; } /* * Set up a list of consoles. Called from init/main.c */ static int __init console_setup(char *str) { char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */ char *s, *options, *brl_options = NULL; int idx; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (!memcmp(str, "brl,", 4)) { brl_options = ""; str += 4; } else if (!memcmp(str, "brl=", 4)) { brl_options = str + 4; str = strchr(brl_options, ','); if (!str) { printk(KERN_ERR "need port name after brl=\n"); return 1; } *(str++) = 0; } #endif /* * Decode str into name, index, options. */ if (str[0] >= '0' && str[0] <= '9') { strcpy(buf, "ttyS"); strncpy(buf + 4, str, sizeof(buf) - 5); } else { strncpy(buf, str, sizeof(buf) - 1); } buf[sizeof(buf) - 1] = 0; if ((options = strchr(str, ',')) != NULL) *(options++) = 0; #ifdef __sparc__ if (!strcmp(str, "ttya")) strcpy(buf, "ttyS0"); if (!strcmp(str, "ttyb")) strcpy(buf, "ttyS1"); #endif for (s = buf; *s; s++) if ((*s >= '0' && *s <= '9') || *s == ',') break; idx = simple_strtoul(s, NULL, 10); *s = 0; __add_preferred_console(buf, idx, options, brl_options); console_set_on_cmdline = 1; return 1; } __setup("console=", console_setup); /** * add_preferred_console - add a device to the list of preferred consoles. * @name: device name * @idx: device index * @options: options for this console * * The last preferred console added will be used for kernel messages * and stdin/out/err for init. Normally this is used by console_setup * above to handle user-supplied console arguments; however it can also * be used by arch-specific code either to override the user or more * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ int add_preferred_console(char *name, int idx, char *options) { return __add_preferred_console(name, idx, options, NULL); } int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) { struct console_cmdline *c; int i; for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) if (strcmp(console_cmdline[i].name, name) == 0 && console_cmdline[i].index == idx) { c = &console_cmdline[i]; strlcpy(c->name, name_new, sizeof(c->name)); c->name[sizeof(c->name) - 1] = 0; c->options = options; c->index = idx_new; return i; } /* not found */ return -1; } bool console_suspend_enabled = 1; EXPORT_SYMBOL(console_suspend_enabled); static int __init console_suspend_disable(char *str) { console_suspend_enabled = 0; return 1; } __setup("no_console_suspend", console_suspend_disable); module_param_named(console_suspend, console_suspend_enabled, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(console_suspend, "suspend console during suspend" " and hibernate operations"); /** * suspend_console - suspend the console subsystem * * This disables printk() while we go into suspend states */ void suspend_console(void) { if (!console_suspend_enabled) return; printk("Suspending console(s) (use no_console_suspend to debug)\n"); console_lock(); console_suspended = 1; up(&console_sem); } void resume_console(void) { if (!console_suspend_enabled) return; down(&console_sem); console_suspended = 0; console_unlock(); } /** * console_cpu_notify - print deferred console messages after CPU hotplug * @self: notifier struct * @action: CPU hotplug event * @hcpu: unused * * If printk() is called from a CPU that is not online yet, the messages * will be spooled but will not show up on the console. This function is * called when a new CPU comes online (or fails to come up), and ensures * that any such output gets printed. */ static int __cpuinit console_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { switch (action) { case CPU_ONLINE: case CPU_DEAD: case CPU_DYING: case CPU_DOWN_FAILED: case CPU_UP_CANCELED: console_lock(); console_unlock(); } return NOTIFY_OK; } /** * console_lock - lock the console system for exclusive use. * * Acquires a lock which guarantees that the caller has * exclusive access to the console system and the console_drivers list. * * Can sleep, returns nothing. */ void console_lock(void) { BUG_ON(in_interrupt()); down(&console_sem); if (console_suspended) return; console_locked = 1; console_may_schedule = 1; } EXPORT_SYMBOL(console_lock); /** * console_trylock - try to lock the console system for exclusive use. * * Tried to acquire a lock which guarantees that the caller has * exclusive access to the console system and the console_drivers list. * * returns 1 on success, and 0 on failure to acquire the lock. */ int console_trylock(void) { if (down_trylock(&console_sem)) return 0; if (console_suspended) { up(&console_sem); return 0; } console_locked = 1; console_may_schedule = 0; return 1; } EXPORT_SYMBOL(console_trylock); int is_console_locked(void) { return console_locked; } /* * Delayed printk facility, for scheduler-internal messages: */ #define PRINTK_BUF_SIZE 512 #define PRINTK_PENDING_WAKEUP 0x01 #define PRINTK_PENDING_SCHED 0x02 static DEFINE_PER_CPU(int, printk_pending); static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); void printk_tick(void) { if (__this_cpu_read(printk_pending)) { int pending = __this_cpu_xchg(printk_pending, 0); if (pending & PRINTK_PENDING_SCHED) { char *buf = __get_cpu_var(printk_sched_buf); printk(KERN_WARNING "[sched_delayed] %s", buf); } if (pending & PRINTK_PENDING_WAKEUP) wake_up_interruptible(&log_wait); } } int printk_needs_cpu(int cpu) { if (cpu_is_offline(cpu)) printk_tick(); return __this_cpu_read(printk_pending); } void wake_up_klogd(void) { if (waitqueue_active(&log_wait)) this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); } /** * console_unlock - unlock the console system * * Releases the console_lock which the caller holds on the console system * and the console driver list. * * While the console_lock was held, console output may have been buffered * by printk(). If this is the case, console_unlock(); emits * the output prior to releasing the lock. * * If there is output waiting for klogd, we wake it up. * * console_unlock(); may be called from any context. */ void console_unlock(void) { unsigned long flags; unsigned _con_start, _log_end; unsigned wake_klogd = 0, retry = 0; if (console_suspended) { up(&console_sem); return; } console_may_schedule = 0; again: for ( ; ; ) { raw_spin_lock_irqsave(&logbuf_lock, flags); wake_klogd |= log_start - log_end; if (con_start == log_end) break; /* Nothing to print */ _con_start = con_start; _log_end = log_end; con_start = log_end; /* Flush */ raw_spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ call_console_drivers(_con_start, _log_end); start_critical_timings(); local_irq_restore(flags); } console_locked = 0; /* Release the exclusive_console once it is used */ if (unlikely(exclusive_console)) exclusive_console = NULL; raw_spin_unlock(&logbuf_lock); up(&console_sem); /* * Someone could have filled up the buffer again, so re-check if there's * something to flush. In case we cannot trylock the console_sem again, * there's a new owner and the console_unlock() from them will do the * flush, no worries. */ raw_spin_lock(&logbuf_lock); if (con_start != log_end) retry = 1; raw_spin_unlock_irqrestore(&logbuf_lock, flags); if (retry && console_trylock()) goto again; if (wake_klogd) wake_up_klogd(); } EXPORT_SYMBOL(console_unlock); /** * console_conditional_schedule - yield the CPU if required * * If the console code is currently allowed to sleep, and * if this CPU should yield the CPU to another task, do * so here. * * Must be called within console_lock();. */ void __sched console_conditional_schedule(void) { if (console_may_schedule) cond_resched(); } EXPORT_SYMBOL(console_conditional_schedule); void console_unblank(void) { struct console *c; /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. */ if (oops_in_progress) { if (down_trylock(&console_sem) != 0) return; } else console_lock(); console_locked = 1; console_may_schedule = 0; for_each_console(c) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); console_unlock(); } /* * Return the console tty driver structure and its associated index */ struct tty_driver *console_device(int *index) { struct console *c; struct tty_driver *driver = NULL; console_lock(); for_each_console(c) { if (!c->device) continue; driver = c->device(c, index); if (driver) break; } console_unlock(); return driver; } /* * Prevent further output on the passed console device so that (for example) * serial drivers can disable console output before suspending a port, and can * re-enable output afterwards. */ void console_stop(struct console *console) { console_lock(); console->flags &= ~CON_ENABLED; console_unlock(); } EXPORT_SYMBOL(console_stop); void console_start(struct console *console) { console_lock(); console->flags |= CON_ENABLED; console_unlock(); } EXPORT_SYMBOL(console_start); static int __read_mostly keep_bootcon; static int __init keep_bootcon_setup(char *str) { keep_bootcon = 1; printk(KERN_INFO "debug: skip boot console de-registration.\n"); return 0; } early_param("keep_bootcon", keep_bootcon_setup); /* * The console driver calls this routine during kernel initialization * to register the console printing procedure with printk() and to * print any messages that were printed by the kernel before the * console driver was initialized. * * This can happen pretty early during the boot process (because of * early_printk) - sometimes before setup_arch() completes - be careful * of what kernel features are used - they may not be initialised yet. * * There are two types of consoles - bootconsoles (early_printk) and * "real" consoles (everything which is not a bootconsole) which are * handled differently. * - Any number of bootconsoles can be registered at any time. * - As soon as a "real" console is registered, all bootconsoles * will be unregistered automatically. * - Once a "real" console is registered, any attempt to register a * bootconsoles will be rejected */ void register_console(struct console *newcon) { int i; unsigned long flags; struct console *bcon = NULL; /* * before we register a new CON_BOOT console, make sure we don't * already have a valid console */ if (console_drivers && newcon->flags & CON_BOOT) { /* find the last or real console */ for_each_console(bcon) { if (!(bcon->flags & CON_BOOT)) { printk(KERN_INFO "Too late to register bootconsole %s%d\n", newcon->name, newcon->index); return; } } } if (console_drivers && console_drivers->flags & CON_BOOT) bcon = console_drivers; if (preferred_console < 0 || bcon || !console_drivers) preferred_console = selected_console; if (newcon->early_setup) newcon->early_setup(); /* * See if we want to use this console driver. If we * didn't select a console we take the first one * that registers here. */ if (preferred_console < 0) { if (newcon->index < 0) newcon->index = 0; if (newcon->setup == NULL || newcon->setup(newcon, NULL) == 0) { newcon->flags |= CON_ENABLED; if (newcon->device) { newcon->flags |= CON_CONSDEV; preferred_console = 0; } } } /* * See if this console matches one we selected on * the command line. */ for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) { if (strcmp(console_cmdline[i].name, newcon->name) != 0) continue; if (newcon->index >= 0 && newcon->index != console_cmdline[i].index) continue; if (newcon->index < 0) newcon->index = console_cmdline[i].index; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (console_cmdline[i].brl_options) { newcon->flags |= CON_BRL; braille_register_console(newcon, console_cmdline[i].index, console_cmdline[i].options, console_cmdline[i].brl_options); return; } #endif if (newcon->setup && newcon->setup(newcon, console_cmdline[i].options) != 0) break; newcon->flags |= CON_ENABLED; newcon->index = console_cmdline[i].index; if (i == selected_console) { newcon->flags |= CON_CONSDEV; preferred_console = selected_console; } break; } if (!(newcon->flags & CON_ENABLED)) return; /* * If we have a bootconsole, and are switching to a real console, * don't print everything out again, since when the boot console, and * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) newcon->flags &= ~CON_PRINTBUFFER; /* * Put this console in the list - keep the * preferred driver at the head of the list. */ console_lock(); if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { newcon->next = console_drivers; console_drivers = newcon; if (newcon->next) newcon->next->flags &= ~CON_CONSDEV; } else { newcon->next = console_drivers->next; console_drivers->next = newcon; } if (newcon->flags & CON_PRINTBUFFER) { /* * console_unlock(); will print out the buffered messages * for us. */ raw_spin_lock_irqsave(&logbuf_lock, flags); con_start = log_start; raw_spin_unlock_irqrestore(&logbuf_lock, flags); /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to * the already-registered consoles. */ exclusive_console = newcon; } console_unlock(); console_sysfs_notify(); /* * By unregistering the bootconsoles after we enable the real console * we get the "console xxx enabled" message on all the consoles - * boot consoles, real consoles, etc - this is to ensure that end * users know there might be something in the kernel's log buffer that * went to the bootconsole (that they do not see on the real console) */ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && !keep_bootcon) { /* we need to iterate through twice, to make sure we print * everything out, before we unregister the console(s) */ printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n", newcon->name, newcon->index); for_each_console(bcon) if (bcon->flags & CON_BOOT) unregister_console(bcon); } else { printk(KERN_INFO "%sconsole [%s%d] enabled\n", (newcon->flags & CON_BOOT) ? "boot" : "" , newcon->name, newcon->index); } } EXPORT_SYMBOL(register_console); int unregister_console(struct console *console) { struct console *a, *b; int res = 1; #ifdef CONFIG_A11Y_BRAILLE_CONSOLE if (console->flags & CON_BRL) return braille_unregister_console(console); #endif console_lock(); if (console_drivers == console) { console_drivers=console->next; res = 0; } else if (console_drivers) { for (a=console_drivers->next, b=console_drivers ; a; b=a, a=b->next) { if (a == console) { b->next = a->next; res = 0; break; } } } /* * If this isn't the last console and it has CON_CONSDEV set, we * need to set it on the next preferred console. */ if (console_drivers != NULL && console->flags & CON_CONSDEV) console_drivers->flags |= CON_CONSDEV; console_unlock(); console_sysfs_notify(); return res; } EXPORT_SYMBOL(unregister_console); static int __init printk_late_init(void) { struct console *con; for_each_console(con) { if (!keep_bootcon && con->flags & CON_BOOT) { printk(KERN_INFO "turn off boot console %s%d\n", con->name, con->index); unregister_console(con); } } hotcpu_notifier(console_cpu_notify, 0); return 0; } late_initcall(printk_late_init); #if defined CONFIG_PRINTK int printk_sched(const char *fmt, ...) { unsigned long flags; va_list args; char *buf; int r; local_irq_save(flags); buf = __get_cpu_var(printk_sched_buf); va_start(args, fmt); r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args); va_end(args); __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); local_irq_restore(flags); return r; } /* * printk rate limiting, lifted from the networking subsystem. * * This enforces a rate limit: not more than 10 kernel messages * every 5s to make a denial-of-service attack impossible. */ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int __printk_ratelimit(const char *func) { return ___ratelimit(&printk_ratelimit_state, func); } EXPORT_SYMBOL(__printk_ratelimit); /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state * @interval_msecs: minimum interval between prints * * printk_timed_ratelimit() returns true if more than @interval_msecs * milliseconds have elapsed since the last time printk_timed_ratelimit() * returned true. */ bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msecs) { if (*caller_jiffies == 0 || !time_in_range(jiffies, *caller_jiffies, *caller_jiffies + msecs_to_jiffies(interval_msecs))) { *caller_jiffies = jiffies; return true; } return false; } EXPORT_SYMBOL(printk_timed_ratelimit); static DEFINE_SPINLOCK(dump_list_lock); static LIST_HEAD(dump_list); /** * kmsg_dump_register - register a kernel log dumper. * @dumper: pointer to the kmsg_dumper structure * * Adds a kernel log dumper to the system. The dump callback in the * structure will be called when the kernel oopses or panics and must be * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. */ int kmsg_dump_register(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EBUSY; /* The dump callback needs to be set */ if (!dumper->dump) return -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); /* Don't allow registering multiple times */ if (!dumper->registered) { dumper->registered = 1; list_add_tail_rcu(&dumper->list, &dump_list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_register); /** * kmsg_dump_unregister - unregister a kmsg dumper. * @dumper: pointer to the kmsg_dumper structure * * Removes a dump device from the system. Returns zero on success and * %-EINVAL otherwise. */ int kmsg_dump_unregister(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); if (dumper->registered) { dumper->registered = 0; list_del_rcu(&dumper->list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); synchronize_rcu(); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_unregister); /** * kmsg_dump - dump kernel log to kernel message dumpers. * @reason: the reason (oops, panic etc) for dumping * * Iterate through each of the dump devices and call the oops/panic * callbacks with the log buffer. */ void kmsg_dump(enum kmsg_dump_reason reason) { unsigned long end; unsigned chars; struct kmsg_dumper *dumper; const char *s1, *s2; unsigned long l1, l2; unsigned long flags; if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) return; /* Theoretically, the log could move on after we do this, but there's not a lot we can do about that. The new messages will overwrite the start of what we dump. */ raw_spin_lock_irqsave(&logbuf_lock, flags); end = log_end & LOG_BUF_MASK; chars = logged_chars; raw_spin_unlock_irqrestore(&logbuf_lock, flags); if (chars > end) { s1 = log_buf + log_buf_len - chars + end; l1 = chars - end; s2 = log_buf; l2 = end; } else { s1 = ""; l1 = 0; s2 = log_buf + end - chars; l2 = chars; } rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) dumper->dump(dumper, reason, s1, l1, s2, l2); rcu_read_unlock(); } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/good_5589_1
crossvul-cpp_data_good_129_0
#include <math.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include "lua.h" #include "lauxlib.h" #define LUACMSGPACK_NAME "cmsgpack" #define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" #define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" #define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" #define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" /* Allows a preprocessor directive to override MAX_NESTING */ #ifndef LUACMSGPACK_MAX_NESTING #define LUACMSGPACK_MAX_NESTING 16 /* Max tables nesting. */ #endif /* Check if float or double can be an integer without loss of precision */ #define IS_INT_TYPE_EQUIVALENT(x, T) (!isinf(x) && (T)(x) == (x)) #define IS_INT64_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int64_t) #define IS_INT_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int) /* If size of pointer is equal to a 4 byte integer, we're on 32 bits. */ #if UINTPTR_MAX == UINT_MAX #define BITS_32 1 #else #define BITS_32 0 #endif #if BITS_32 #define lua_pushunsigned(L, n) lua_pushnumber(L, n) #else #define lua_pushunsigned(L, n) lua_pushinteger(L, n) #endif /* ============================================================================= * MessagePack implementation and bindings for Lua 5.1/5.2. * Copyright(C) 2012 Salvatore Sanfilippo <antirez@gmail.com> * * http://github.com/antirez/lua-cmsgpack * * For MessagePack specification check the following web site: * http://wiki.msgpack.org/display/MSGPACK/Format+specification * * See Copyright Notice at the end of this file. * * CHANGELOG: * 19-Feb-2012 (ver 0.1.0): Initial release. * 20-Feb-2012 (ver 0.2.0): Tables encoding improved. * 20-Feb-2012 (ver 0.2.1): Minor bug fixing. * 20-Feb-2012 (ver 0.3.0): Module renamed lua-cmsgpack (was lua-msgpack). * 04-Apr-2014 (ver 0.3.1): Lua 5.2 support and minor bug fix. * 07-Apr-2014 (ver 0.4.0): Multiple pack/unpack, lua allocator, efficiency. * ========================================================================== */ /* -------------------------- Endian conversion -------------------------------- * We use it only for floats and doubles, all the other conversions performed * in an endian independent fashion. So the only thing we need is a function * that swaps a binary string if arch is little endian (and left it untouched * otherwise). */ /* Reverse memory bytes if arch is little endian. Given the conceptual * simplicity of the Lua build system we prefer check for endianess at runtime. * The performance difference should be acceptable. */ void memrevifle(void *ptr, size_t len) { unsigned char *p = (unsigned char *)ptr, *e = (unsigned char *)p+len-1, aux; int test = 1; unsigned char *testp = (unsigned char*) &test; if (testp[0] == 0) return; /* Big endian, nothing to do. */ len /= 2; while(len--) { aux = *p; *p = *e; *e = aux; p++; e--; } } /* ---------------------------- String buffer ---------------------------------- * This is a simple implementation of string buffers. The only operation * supported is creating empty buffers and appending bytes to it. * The string buffer uses 2x preallocation on every realloc for O(N) append * behavior. */ typedef struct mp_buf { unsigned char *b; size_t len, free; } mp_buf; void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { void *(*local_realloc) (void *, void *, size_t osize, size_t nsize) = NULL; void *ud; local_realloc = lua_getallocf(L, &ud); return local_realloc(ud, target, osize, nsize); } mp_buf *mp_buf_new(lua_State *L) { mp_buf *buf = NULL; /* Old size = 0; new size = sizeof(*buf) */ buf = (mp_buf*)mp_realloc(L, NULL, 0, sizeof(*buf)); buf->b = NULL; buf->len = buf->free = 0; return buf; } void mp_buf_append(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { if (buf->free < len) { size_t newsize = (buf->len+len)*2; buf->b = (unsigned char*)mp_realloc(L, buf->b, buf->len + buf->free, newsize); buf->free = newsize - buf->len; } memcpy(buf->b+buf->len,s,len); buf->len += len; buf->free -= len; } void mp_buf_free(lua_State *L, mp_buf *buf) { mp_realloc(L, buf->b, buf->len + buf->free, 0); /* realloc to 0 = free */ mp_realloc(L, buf, sizeof(*buf), 0); } /* ---------------------------- String cursor ---------------------------------- * This simple data structure is used for parsing. Basically you create a cursor * using a string pointer and a length, then it is possible to access the * current string position with cursor->p, check the remaining length * in cursor->left, and finally consume more string using * mp_cur_consume(cursor,len), to advance 'p' and subtract 'left'. * An additional field cursor->error is set to zero on initialization and can * be used to report errors. */ #define MP_CUR_ERROR_NONE 0 #define MP_CUR_ERROR_EOF 1 /* Not enough data to complete operation. */ #define MP_CUR_ERROR_BADFMT 2 /* Bad data format */ typedef struct mp_cur { const unsigned char *p; size_t left; int err; } mp_cur; void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { cursor->p = s; cursor->left = len; cursor->err = MP_CUR_ERROR_NONE; } #define mp_cur_consume(_c,_len) do { _c->p += _len; _c->left -= _len; } while(0) /* When there is not enough room we set an error in the cursor and return. This * is very common across the code so we have a macro to make the code look * a bit simpler. */ #define mp_cur_need(_c,_len) do { \ if (_c->left < _len) { \ _c->err = MP_CUR_ERROR_EOF; \ return; \ } \ } while(0) /* ------------------------- Low level MP encoding -------------------------- */ void mp_encode_bytes(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { unsigned char hdr[5]; int hdrlen; if (len < 32) { hdr[0] = 0xa0 | (len&0xff); /* fix raw */ hdrlen = 1; } else if (len <= 0xff) { hdr[0] = 0xd9; hdr[1] = len; hdrlen = 2; } else if (len <= 0xffff) { hdr[0] = 0xda; hdr[1] = (len&0xff00)>>8; hdr[2] = len&0xff; hdrlen = 3; } else { hdr[0] = 0xdb; hdr[1] = (len&0xff000000)>>24; hdr[2] = (len&0xff0000)>>16; hdr[3] = (len&0xff00)>>8; hdr[4] = len&0xff; hdrlen = 5; } mp_buf_append(L,buf,hdr,hdrlen); mp_buf_append(L,buf,s,len); } /* we assume IEEE 754 internal format for single and double precision floats. */ void mp_encode_double(lua_State *L, mp_buf *buf, double d) { unsigned char b[9]; float f = d; assert(sizeof(f) == 4 && sizeof(d) == 8); if (d == (double)f) { b[0] = 0xca; /* float IEEE 754 */ memcpy(b+1,&f,4); memrevifle(b+1,4); mp_buf_append(L,buf,b,5); } else if (sizeof(d) == 8) { b[0] = 0xcb; /* double IEEE 754 */ memcpy(b+1,&d,8); memrevifle(b+1,8); mp_buf_append(L,buf,b,9); } } void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[9]; int enclen; if (n >= 0) { if (n <= 127) { b[0] = n & 0x7f; /* positive fixnum */ enclen = 1; } else if (n <= 0xff) { b[0] = 0xcc; /* uint 8 */ b[1] = n & 0xff; enclen = 2; } else if (n <= 0xffff) { b[0] = 0xcd; /* uint 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else if (n <= 0xffffffffLL) { b[0] = 0xce; /* uint 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } else { b[0] = 0xcf; /* uint 64 */ b[1] = (n & 0xff00000000000000LL) >> 56; b[2] = (n & 0xff000000000000LL) >> 48; b[3] = (n & 0xff0000000000LL) >> 40; b[4] = (n & 0xff00000000LL) >> 32; b[5] = (n & 0xff000000) >> 24; b[6] = (n & 0xff0000) >> 16; b[7] = (n & 0xff00) >> 8; b[8] = n & 0xff; enclen = 9; } } else { if (n >= -32) { b[0] = ((signed char)n); /* negative fixnum */ enclen = 1; } else if (n >= -128) { b[0] = 0xd0; /* int 8 */ b[1] = n & 0xff; enclen = 2; } else if (n >= -32768) { b[0] = 0xd1; /* int 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else if (n >= -2147483648LL) { b[0] = 0xd2; /* int 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } else { b[0] = 0xd3; /* int 64 */ b[1] = (n & 0xff00000000000000LL) >> 56; b[2] = (n & 0xff000000000000LL) >> 48; b[3] = (n & 0xff0000000000LL) >> 40; b[4] = (n & 0xff00000000LL) >> 32; b[5] = (n & 0xff000000) >> 24; b[6] = (n & 0xff0000) >> 16; b[7] = (n & 0xff00) >> 8; b[8] = n & 0xff; enclen = 9; } } mp_buf_append(L,buf,b,enclen); } void mp_encode_array(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; if (n <= 15) { b[0] = 0x90 | (n & 0xf); /* fix array */ enclen = 1; } else if (n <= 65535) { b[0] = 0xdc; /* array 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else { b[0] = 0xdd; /* array 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } mp_buf_append(L,buf,b,enclen); } void mp_encode_map(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; if (n <= 15) { b[0] = 0x80 | (n & 0xf); /* fix map */ enclen = 1; } else if (n <= 65535) { b[0] = 0xde; /* map 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else { b[0] = 0xdf; /* map 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } mp_buf_append(L,buf,b,enclen); } /* --------------------------- Lua types encoding --------------------------- */ void mp_encode_lua_string(lua_State *L, mp_buf *buf) { size_t len; const char *s; s = lua_tolstring(L,-1,&len); mp_encode_bytes(L,buf,(const unsigned char*)s,len); } void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2; mp_buf_append(L,buf,&b,1); } /* Lua 5.3 has a built in 64-bit integer type */ void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { #if (LUA_VERSION_NUM < 503) && BITS_32 lua_Number i = lua_tonumber(L,-1); #else lua_Integer i = lua_tointeger(L,-1); #endif mp_encode_int(L, buf, (int64_t)i); } /* Lua 5.2 and lower only has 64-bit doubles, so we need to * detect if the double may be representable as an int * for Lua < 5.3 */ void mp_encode_lua_number(lua_State *L, mp_buf *buf) { lua_Number n = lua_tonumber(L,-1); if (IS_INT64_EQUIVALENT(n)) { mp_encode_lua_integer(L, buf); } else { mp_encode_double(L,buf,(double)n); } } void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); /* Convert a lua table into a message pack list. */ void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { #if LUA_VERSION_NUM < 502 size_t len = lua_objlen(L,-1), j; #else size_t len = lua_rawlen(L,-1), j; #endif mp_encode_array(L,buf,len); luaL_checkstack(L, 1, "in function mp_encode_lua_table_as_array"); for (j = 1; j <= len; j++) { lua_pushnumber(L,j); lua_gettable(L,-2); mp_encode_lua_type(L,buf,level+1); } } /* Convert a lua table into a message pack key-value map. */ void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { size_t len = 0; /* First step: count keys into table. No other way to do it with the * Lua API, we need to iterate a first time. Note that an alternative * would be to do a single run, and then hack the buffer to insert the * map opcodes for message pack. Too hackish for this lib. */ luaL_checkstack(L, 3, "in function mp_encode_lua_table_as_map"); lua_pushnil(L); while(lua_next(L,-2)) { lua_pop(L,1); /* remove value, keep key for next iteration. */ len++; } /* Step two: actually encoding of the map. */ mp_encode_map(L,buf,len); lua_pushnil(L); while(lua_next(L,-2)) { /* Stack: ... key value */ lua_pushvalue(L,-2); /* Stack: ... key value key */ mp_encode_lua_type(L,buf,level+1); /* encode key */ mp_encode_lua_type(L,buf,level+1); /* encode val */ } } /* Returns true if the Lua table on top of the stack is exclusively composed * of keys from numerical keys from 1 up to N, with N being the total number * of elements, without any hole in the middle. */ int table_is_an_array(lua_State *L) { int count = 0, max = 0; #if LUA_VERSION_NUM < 503 lua_Number n; #else lua_Integer n; #endif /* Stack top on function entry */ int stacktop; stacktop = lua_gettop(L); lua_pushnil(L); while(lua_next(L,-2)) { /* Stack: ... key value */ lua_pop(L,1); /* Stack: ... key */ /* The <= 0 check is valid here because we're comparing indexes. */ #if LUA_VERSION_NUM < 503 if ((LUA_TNUMBER != lua_type(L,-1)) || (n = lua_tonumber(L, -1)) <= 0 || !IS_INT_EQUIVALENT(n)) #else if (!lua_isinteger(L,-1) || (n = lua_tointeger(L, -1)) <= 0) #endif { lua_settop(L, stacktop); return 0; } max = (n > max ? n : max); count++; } /* We have the total number of elements in "count". Also we have * the max index encountered in "max". We can't reach this code * if there are indexes <= 0. If you also note that there can not be * repeated keys into a table, you have that if max==count you are sure * that there are all the keys form 1 to count (both included). */ lua_settop(L, stacktop); return max == count; } /* If the length operator returns non-zero, that is, there is at least * an object at key '1', we serialize to message pack list. Otherwise * we use a map. */ void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { if (table_is_an_array(L)) mp_encode_lua_table_as_array(L,buf,level); else mp_encode_lua_table_as_map(L,buf,level); } void mp_encode_lua_null(lua_State *L, mp_buf *buf) { unsigned char b[1]; b[0] = 0xc0; mp_buf_append(L,buf,b,1); } void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { int t = lua_type(L,-1); /* Limit the encoding of nested tables to a specified maximum depth, so that * we survive when called against circular references in tables. */ if (t == LUA_TTABLE && level == LUACMSGPACK_MAX_NESTING) t = LUA_TNIL; switch(t) { case LUA_TSTRING: mp_encode_lua_string(L,buf); break; case LUA_TBOOLEAN: mp_encode_lua_bool(L,buf); break; case LUA_TNUMBER: #if LUA_VERSION_NUM < 503 mp_encode_lua_number(L,buf); break; #else if (lua_isinteger(L, -1)) { mp_encode_lua_integer(L, buf); } else { mp_encode_lua_number(L, buf); } break; #endif case LUA_TTABLE: mp_encode_lua_table(L,buf,level); break; default: mp_encode_lua_null(L,buf); break; } lua_pop(L,1); } /* * Packs all arguments as a stream for multiple upacking later. * Returns error if no arguments provided. */ int mp_pack(lua_State *L) { int nargs = lua_gettop(L); int i; mp_buf *buf; if (nargs == 0) return luaL_argerror(L, 0, "MessagePack pack needs input."); if (!lua_checkstack(L, nargs)) return luaL_argerror(L, 0, "Too many arguments for MessagePack pack."); buf = mp_buf_new(L); for(i = 1; i <= nargs; i++) { /* Copy argument i to top of stack for _encode processing; * the encode function pops it from the stack when complete. */ luaL_checkstack(L, 1, "in function mp_check"); lua_pushvalue(L, i); mp_encode_lua_type(L,buf,0); lua_pushlstring(L,(char*)buf->b,buf->len); /* Reuse the buffer for the next operation by * setting its free count to the total buffer size * and the current position to zero. */ buf->free += buf->len; buf->len = 0; } mp_buf_free(L, buf); /* Concatenate all nargs buffers together */ lua_concat(L, nargs); return 1; } /* ------------------------------- Decoding --------------------------------- */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c); void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) { assert(len <= UINT_MAX); int index = 1; lua_newtable(L); luaL_checkstack(L, 1, "in function mp_decode_to_lua_array"); while(len--) { lua_pushnumber(L,index++); mp_decode_to_lua_type(L,c); if (c->err) return; lua_settable(L,-3); } } void mp_decode_to_lua_hash(lua_State *L, mp_cur *c, size_t len) { assert(len <= UINT_MAX); lua_newtable(L); while(len--) { mp_decode_to_lua_type(L,c); /* key */ if (c->err) return; mp_decode_to_lua_type(L,c); /* value */ if (c->err) return; lua_settable(L,-3); } } /* Decode a Message Pack raw object pointed by the string cursor 'c' to * a Lua type, that is left as the only result on the stack. */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { mp_cur_need(c,1); /* If we return more than 18 elements, we must resize the stack to * fit all our return values. But, there is no way to * determine how many objects a msgpack will unpack to up front, so * we request a +1 larger stack on each iteration (noop if stack is * big enough, and when stack does require resize it doubles in size) */ luaL_checkstack(L, 1, "too many return values at once; " "use unpack_one or unpack_limit instead."); switch(c->p[0]) { case 0xcc: /* uint 8 */ mp_cur_need(c,2); lua_pushunsigned(L,c->p[1]); mp_cur_consume(c,2); break; case 0xd0: /* int 8 */ mp_cur_need(c,2); lua_pushinteger(L,(signed char)c->p[1]); mp_cur_consume(c,2); break; case 0xcd: /* uint 16 */ mp_cur_need(c,3); lua_pushunsigned(L, (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xd1: /* int 16 */ mp_cur_need(c,3); lua_pushinteger(L,(int16_t) (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xce: /* uint 32 */ mp_cur_need(c,5); lua_pushunsigned(L, ((uint32_t)c->p[1] << 24) | ((uint32_t)c->p[2] << 16) | ((uint32_t)c->p[3] << 8) | (uint32_t)c->p[4]); mp_cur_consume(c,5); break; case 0xd2: /* int 32 */ mp_cur_need(c,5); lua_pushinteger(L, ((int32_t)c->p[1] << 24) | ((int32_t)c->p[2] << 16) | ((int32_t)c->p[3] << 8) | (int32_t)c->p[4]); mp_cur_consume(c,5); break; case 0xcf: /* uint 64 */ mp_cur_need(c,9); lua_pushunsigned(L, ((uint64_t)c->p[1] << 56) | ((uint64_t)c->p[2] << 48) | ((uint64_t)c->p[3] << 40) | ((uint64_t)c->p[4] << 32) | ((uint64_t)c->p[5] << 24) | ((uint64_t)c->p[6] << 16) | ((uint64_t)c->p[7] << 8) | (uint64_t)c->p[8]); mp_cur_consume(c,9); break; case 0xd3: /* int 64 */ mp_cur_need(c,9); #if LUA_VERSION_NUM < 503 lua_pushnumber(L, #else lua_pushinteger(L, #endif ((int64_t)c->p[1] << 56) | ((int64_t)c->p[2] << 48) | ((int64_t)c->p[3] << 40) | ((int64_t)c->p[4] << 32) | ((int64_t)c->p[5] << 24) | ((int64_t)c->p[6] << 16) | ((int64_t)c->p[7] << 8) | (int64_t)c->p[8]); mp_cur_consume(c,9); break; case 0xc0: /* nil */ lua_pushnil(L); mp_cur_consume(c,1); break; case 0xc3: /* true */ lua_pushboolean(L,1); mp_cur_consume(c,1); break; case 0xc2: /* false */ lua_pushboolean(L,0); mp_cur_consume(c,1); break; case 0xca: /* float */ mp_cur_need(c,5); assert(sizeof(float) == 4); { float f; memcpy(&f,c->p+1,4); memrevifle(&f,4); lua_pushnumber(L,f); mp_cur_consume(c,5); } break; case 0xcb: /* double */ mp_cur_need(c,9); assert(sizeof(double) == 8); { double d; memcpy(&d,c->p+1,8); memrevifle(&d,8); lua_pushnumber(L,d); mp_cur_consume(c,9); } break; case 0xd9: /* raw 8 */ mp_cur_need(c,2); { size_t l = c->p[1]; mp_cur_need(c,2+l); lua_pushlstring(L,(char*)c->p+2,l); mp_cur_consume(c,2+l); } break; case 0xda: /* raw 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_need(c,3+l); lua_pushlstring(L,(char*)c->p+3,l); mp_cur_consume(c,3+l); } break; case 0xdb: /* raw 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_cur_need(c,l); lua_pushlstring(L,(char*)c->p,l); mp_cur_consume(c,l); } break; case 0xdc: /* array 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_consume(c,3); mp_decode_to_lua_array(L,c,l); } break; case 0xdd: /* array 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_array(L,c,l); } break; case 0xde: /* map 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_consume(c,3); mp_decode_to_lua_hash(L,c,l); } break; case 0xdf: /* map 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_hash(L,c,l); } break; default: /* types that can't be idenitified by first byte value. */ if ((c->p[0] & 0x80) == 0) { /* positive fixnum */ lua_pushunsigned(L,c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xe0) { /* negative fixnum */ lua_pushinteger(L,(signed char)c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xa0) { /* fix raw */ size_t l = c->p[0] & 0x1f; mp_cur_need(c,1+l); lua_pushlstring(L,(char*)c->p+1,l); mp_cur_consume(c,1+l); } else if ((c->p[0] & 0xf0) == 0x90) { /* fix map */ size_t l = c->p[0] & 0xf; mp_cur_consume(c,1); mp_decode_to_lua_array(L,c,l); } else if ((c->p[0] & 0xf0) == 0x80) { /* fix map */ size_t l = c->p[0] & 0xf; mp_cur_consume(c,1); mp_decode_to_lua_hash(L,c,l); } else { c->err = MP_CUR_ERROR_BADFMT; } } } int mp_unpack_full(lua_State *L, int limit, int offset) { size_t len; const char *s; mp_cur c; int cnt; /* Number of objects unpacked */ int decode_all = (!limit && !offset); s = luaL_checklstring(L,1,&len); /* if no match, exits */ if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */ return luaL_error(L, "Invalid request to unpack with offset of %d and limit of %d.", offset, len); else if (offset > len) return luaL_error(L, "Start offset %d greater than input length %d.", offset, len); if (decode_all) limit = INT_MAX; mp_cur_init(&c,(const unsigned char *)s+offset,len-offset); /* We loop over the decode because this could be a stream * of multiple top-level values serialized together */ for(cnt = 0; c.left > 0 && cnt < limit; cnt++) { mp_decode_to_lua_type(L,&c); if (c.err == MP_CUR_ERROR_EOF) { return luaL_error(L,"Missing bytes in input."); } else if (c.err == MP_CUR_ERROR_BADFMT) { return luaL_error(L,"Bad data format in input."); } } if (!decode_all) { /* c->left is the remaining size of the input buffer. * subtract the entire buffer size from the unprocessed size * to get our next start offset */ int offset = len - c.left; luaL_checkstack(L, 1, "in function mp_unpack_full"); /* Return offset -1 when we have have processed the entire buffer. */ lua_pushinteger(L, c.left == 0 ? -1 : offset); /* Results are returned with the arg elements still * in place. Lua takes care of only returning * elements above the args for us. * In this case, we have one arg on the stack * for this function, so we insert our first return * value at position 2. */ lua_insert(L, 2); cnt += 1; /* increase return count by one to make room for offset */ } return cnt; } int mp_unpack(lua_State *L) { return mp_unpack_full(L, 0, 0); } int mp_unpack_one(lua_State *L) { int offset = luaL_optinteger(L, 2, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, 1, offset); } int mp_unpack_limit(lua_State *L) { int limit = luaL_checkinteger(L, 2); int offset = luaL_optinteger(L, 3, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, limit, offset); } int mp_safe(lua_State *L) { int argc, err, total_results; argc = lua_gettop(L); /* This adds our function to the bottom of the stack * (the "call this function" position) */ lua_pushvalue(L, lua_upvalueindex(1)); lua_insert(L, 1); err = lua_pcall(L, argc, LUA_MULTRET, 0); total_results = lua_gettop(L); if (!err) { return total_results; } else { lua_pushnil(L); lua_insert(L,-2); return 2; } } /* -------------------------------------------------------------------------- */ const struct luaL_Reg cmds[] = { {"pack", mp_pack}, {"unpack", mp_unpack}, {"unpack_one", mp_unpack_one}, {"unpack_limit", mp_unpack_limit}, {0} }; int luaopen_create(lua_State *L) { int i; /* Manually construct our module table instead of * relying on _register or _newlib */ lua_newtable(L); for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { lua_pushcfunction(L, cmds[i].func); lua_setfield(L, -2, cmds[i].name); } /* Add metadata */ lua_pushliteral(L, LUACMSGPACK_NAME); lua_setfield(L, -2, "_NAME"); lua_pushliteral(L, LUACMSGPACK_VERSION); lua_setfield(L, -2, "_VERSION"); lua_pushliteral(L, LUACMSGPACK_COPYRIGHT); lua_setfield(L, -2, "_COPYRIGHT"); lua_pushliteral(L, LUACMSGPACK_DESCRIPTION); lua_setfield(L, -2, "_DESCRIPTION"); return 1; } LUALIB_API int luaopen_cmsgpack(lua_State *L) { luaopen_create(L); #if LUA_VERSION_NUM < 502 /* Register name globally for 5.1 */ lua_pushvalue(L, -1); lua_setglobal(L, LUACMSGPACK_NAME); #endif return 1; } LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) { int i; luaopen_cmsgpack(L); /* Wrap all functions in the safe handler */ for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { lua_getfield(L, -1, cmds[i].name); lua_pushcclosure(L, mp_safe, 1); lua_setfield(L, -2, cmds[i].name); } #if LUA_VERSION_NUM < 502 /* Register name globally for 5.1 */ lua_pushvalue(L, -1); lua_setglobal(L, LUACMSGPACK_SAFE_NAME); #endif return 1; } /****************************************************************************** * Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
./CrossVul/dataset_final_sorted/CWE-119/c/good_129_0
crossvul-cpp_data_bad_244_0
/** * @file * Manage IMAP messages * * @authors * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009 Brendan Cully <brendan@kublai.com> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page imap_message Manage IMAP messages * * Manage IMAP messages */ #include "config.h" #include <ctype.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "imap_private.h" #include "mutt/mutt.h" #include "conn/conn.h" #include "mutt.h" #include "message.h" #include "bcache.h" #include "body.h" #include "context.h" #include "envelope.h" #include "globals.h" #include "header.h" #include "imap/imap.h" #include "mailbox.h" #include "mutt_account.h" #include "mutt_curses.h" #include "mutt_socket.h" #include "mx.h" #include "options.h" #include "progress.h" #include "protos.h" #include "tags.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif struct BodyCache; /** * new_header_data - Create a new ImapHeaderData * @retval ptr New ImapHeaderData */ static struct ImapHeaderData *new_header_data(void) { struct ImapHeaderData *d = mutt_mem_calloc(1, sizeof(struct ImapHeaderData)); return d; } /** * update_context - Cache the headers of all the emails * @param idata Server data * @param oldmsgcount Number of emails */ static void update_context(struct ImapData *idata, int oldmsgcount) { struct Header *h = NULL; struct Context *ctx = idata->ctx; if (!idata->uid_hash) idata->uid_hash = mutt_hash_int_create(MAX(6 * ctx->msgcount / 5, 30), 0); for (int msgno = oldmsgcount; msgno < ctx->msgcount; msgno++) { h = ctx->hdrs[msgno]; mutt_hash_int_insert(idata->uid_hash, HEADER_DATA(h)->uid, h); } } /** * msg_cache_open - Open a message cache * @param idata Server data * @retval ptr Success, using existing cache * @retval ptr Success, opened new cache * @retval NULL Failure */ static struct BodyCache *msg_cache_open(struct ImapData *idata) { char mailbox[PATH_MAX]; if (idata->bcache) return idata->bcache; imap_cachepath(idata, idata->mailbox, mailbox, sizeof(mailbox)); return mutt_bcache_open(&idata->conn->account, mailbox); } /** * msg_cache_get - Get the message cache entry for an email * @param idata Server data * @param h Email header * @retval ptr Success, handle of cache entry * @retval NULL Failure */ static FILE *msg_cache_get(struct ImapData *idata, struct Header *h) { if (!idata || !h) return NULL; idata->bcache = msg_cache_open(idata); char id[64]; snprintf(id, sizeof(id), "%u-%u", idata->uid_validity, HEADER_DATA(h)->uid); return mutt_bcache_get(idata->bcache, id); } /** * msg_cache_put - Put an email into the message cache * @param idata Server data * @param h Email header * @retval ptr Success, handle of cache entry * @retval NULL Failure */ static FILE *msg_cache_put(struct ImapData *idata, struct Header *h) { if (!idata || !h) return NULL; idata->bcache = msg_cache_open(idata); char id[64]; snprintf(id, sizeof(id), "%u-%u", idata->uid_validity, HEADER_DATA(h)->uid); return mutt_bcache_put(idata->bcache, id); } /** * msg_cache_commit - Add to the message cache * @param idata Server data * @param h Email header * @retval 0 Success * @retval -1 Failure */ static int msg_cache_commit(struct ImapData *idata, struct Header *h) { if (!idata || !h) return -1; idata->bcache = msg_cache_open(idata); char id[64]; snprintf(id, sizeof(id), "%u-%u", idata->uid_validity, HEADER_DATA(h)->uid); return mutt_bcache_commit(idata->bcache, id); } /** * msg_cache_clean_cb - Delete an entry from the message cache * @param id ID of entry to delete * @param bcache BodyCache * @param data Server data * @retval 0 Always */ static int msg_cache_clean_cb(const char *id, struct BodyCache *bcache, void *data) { unsigned int uv, uid; struct ImapData *idata = data; if (sscanf(id, "%u-%u", &uv, &uid) != 2) return 0; /* bad UID */ if (uv != idata->uid_validity || !mutt_hash_int_find(idata->uid_hash, uid)) mutt_bcache_del(bcache, id); return 0; } /** * msg_parse_flags - read a FLAGS token into an ImapHeader * @param h Header to store flags * @param s Command string containing flags * @retval ptr The end of flags string * @retval NULL Failure */ static char *msg_parse_flags(struct ImapHeader *h, char *s) { struct ImapHeaderData *hd = h->data; /* sanity-check string */ if (mutt_str_strncasecmp("FLAGS", s, 5) != 0) { mutt_debug(1, "not a FLAGS response: %s\n", s); return NULL; } s += 5; SKIPWS(s); if (*s != '(') { mutt_debug(1, "bogus FLAGS response: %s\n", s); return NULL; } s++; FREE(&hd->flags_system); FREE(&hd->flags_remote); hd->deleted = hd->flagged = hd->replied = hd->read = hd->old = false; /* start parsing */ while (*s && *s != ')') { if (mutt_str_strncasecmp("\\deleted", s, 8) == 0) { s += 8; hd->deleted = true; } else if (mutt_str_strncasecmp("\\flagged", s, 8) == 0) { s += 8; hd->flagged = true; } else if (mutt_str_strncasecmp("\\answered", s, 9) == 0) { s += 9; hd->replied = true; } else if (mutt_str_strncasecmp("\\seen", s, 5) == 0) { s += 5; hd->read = true; } else if (mutt_str_strncasecmp("\\recent", s, 7) == 0) s += 7; else if (mutt_str_strncasecmp("old", s, 3) == 0) { s += 3; hd->old = MarkOld ? true : false; } else { char ctmp; char *flag_word = s; bool is_system_keyword = (mutt_str_strncasecmp("\\", s, 1) == 0); while (*s && !ISSPACE(*s) && *s != ')') s++; ctmp = *s; *s = '\0'; /* store other system flags as well (mainly \\Draft) */ if (is_system_keyword) mutt_str_append_item(&hd->flags_system, flag_word, ' '); /* store custom flags as well */ else mutt_str_append_item(&hd->flags_remote, flag_word, ' '); *s = ctmp; } SKIPWS(s); } /* wrap up, or note bad flags response */ if (*s == ')') s++; else { mutt_debug(1, "Unterminated FLAGS response: %s\n", s); return NULL; } return s; } /** * msg_parse_fetch - handle headers returned from header fetch * @param h IMAP Header * @param s Command string * @retval 0 Success * @retval -1 String is corrupted * @retval -2 Fetch contains a body or header lines that still need to be parsed */ static int msg_parse_fetch(struct ImapHeader *h, char *s) { char tmp[SHORT_STRING]; char *ptmp = NULL; if (!s) return -1; while (*s) { SKIPWS(s); if (mutt_str_strncasecmp("FLAGS", s, 5) == 0) { s = msg_parse_flags(h, s); if (!s) return -1; } else if (mutt_str_strncasecmp("UID", s, 3) == 0) { s += 3; SKIPWS(s); if (mutt_str_atoui(s, &h->data->uid) < 0) return -1; s = imap_next_word(s); } else if (mutt_str_strncasecmp("INTERNALDATE", s, 12) == 0) { s += 12; SKIPWS(s); if (*s != '\"') { mutt_debug(1, "bogus INTERNALDATE entry: %s\n", s); return -1; } s++; ptmp = tmp; while (*s && *s != '\"') *ptmp++ = *s++; if (*s != '\"') return -1; s++; /* skip past the trailing " */ *ptmp = '\0'; h->received = mutt_date_parse_imap(tmp); } else if (mutt_str_strncasecmp("RFC822.SIZE", s, 11) == 0) { s += 11; SKIPWS(s); ptmp = tmp; while (isdigit((unsigned char) *s)) *ptmp++ = *s++; *ptmp = '\0'; if (mutt_str_atol(tmp, &h->content_length) < 0) return -1; } else if ((mutt_str_strncasecmp("BODY", s, 4) == 0) || (mutt_str_strncasecmp("RFC822.HEADER", s, 13) == 0)) { /* handle above, in msg_fetch_header */ return -2; } else if (*s == ')') s++; /* end of request */ else if (*s) { /* got something i don't understand */ imap_error("msg_parse_fetch", s); return -1; } } return 0; } /** * msg_fetch_header - import IMAP FETCH response into an ImapHeader * @param ctx Context * @param h ImapHeader * @param buf Server string containing FETCH response * @param fp Connection to server * @retval 0 Success * @retval -1 String is not a fetch response * @retval -2 String is a corrupt fetch response * * Expects string beginning with * n FETCH. */ static int msg_fetch_header(struct Context *ctx, struct ImapHeader *h, char *buf, FILE *fp) { unsigned int bytes; int rc = -1; /* default now is that string isn't FETCH response */ int parse_rc; struct ImapData *idata = ctx->data; if (buf[0] != '*') return rc; /* skip to message number */ buf = imap_next_word(buf); if (mutt_str_atoui(buf, &h->data->msn) < 0) return rc; /* find FETCH tag */ buf = imap_next_word(buf); if (mutt_str_strncasecmp("FETCH", buf, 5) != 0) return rc; rc = -2; /* we've got a FETCH response, for better or worse */ buf = strchr(buf, '('); if (!buf) return rc; buf++; /* FIXME: current implementation - call msg_parse_fetch - if it returns -2, * read header lines and call it again. Silly. */ parse_rc = msg_parse_fetch(h, buf); if (!parse_rc) return 0; if (parse_rc != -2 || !fp) return rc; if (imap_get_literal_count(buf, &bytes) == 0) { imap_read_literal(fp, idata, bytes, NULL); /* we may have other fields of the FETCH _after_ the literal * (eg Domino puts FLAGS here). Nothing wrong with that, either. * This all has to go - we should accept literals and nonliterals * interchangeably at any time. */ if (imap_cmd_step(idata) != IMAP_CMD_CONTINUE) return rc; if (msg_parse_fetch(h, idata->buf) == -1) return rc; } rc = 0; /* success */ /* subtract headers from message size - unfortunately only the subset of * headers we've requested. */ h->content_length -= bytes; return rc; } /** * flush_buffer - Write data to a connection * @param buf Buffer containing data * @param len Length of buffer * @param conn Network connection */ static void flush_buffer(char *buf, size_t *len, struct Connection *conn) { buf[*len] = '\0'; mutt_socket_write_n(conn, buf, *len); *len = 0; } /** * alloc_msn_index - Create lookup table of MSN to Header * @param idata Server data * @param msn_count Number of MSNs in use * * Mapping from Message Sequence Number to Header */ static void alloc_msn_index(struct ImapData *idata, size_t msn_count) { size_t new_size; if (msn_count <= idata->msn_index_size) return; /* This is a conservative check to protect against a malicious imap * server. Most likely size_t is bigger than an unsigned int, but * if msn_count is this big, we have a serious problem. */ if (msn_count >= (UINT_MAX / sizeof(struct Header *))) { mutt_error(_("Integer overflow -- can't allocate memory.")); mutt_exit(1); } /* Add a little padding, like mx_allloc_memory() */ new_size = msn_count + 25; if (!idata->msn_index) idata->msn_index = mutt_mem_calloc(new_size, sizeof(struct Header *)); else { mutt_mem_realloc(&idata->msn_index, sizeof(struct Header *) * new_size); memset(idata->msn_index + idata->msn_index_size, 0, sizeof(struct Header *) * (new_size - idata->msn_index_size)); } idata->msn_index_size = new_size; } /** * generate_seqset - Generate a sequence set * @param b Buffer for the result * @param idata Server data * @param msn_begin First Message Sequence number * @param msn_end Last Message Sequence number * * Generates a more complicated sequence set after using the header cache, * in case there are missing MSNs in the middle. * * There is a suggested limit of 1000 bytes for an IMAP client request. * Ideally, we would generate multiple requests if the number of ranges * is too big, but for now just abort to using the whole range. */ static void generate_seqset(struct Buffer *b, struct ImapData *idata, unsigned int msn_begin, unsigned int msn_end) { int chunks = 0; int state = 0; /* 1: single msn, 2: range of msn */ unsigned int msn, range_begin, range_end; for (msn = msn_begin; msn <= msn_end + 1; msn++) { if (msn <= msn_end && !idata->msn_index[msn - 1]) { switch (state) { case 1: /* single: convert to a range */ state = 2; /* fallthrough */ case 2: /* extend range ending */ range_end = msn; break; default: state = 1; range_begin = msn; break; } } else if (state) { if (chunks++) mutt_buffer_addch(b, ','); if (chunks == 150) break; if (state == 1) mutt_buffer_printf(b, "%u", range_begin); else if (state == 2) mutt_buffer_printf(b, "%u:%u", range_begin, range_end); state = 0; } } /* Too big. Just query the whole range then. */ if (chunks == 150 || mutt_str_strlen(b->data) > 500) { b->dptr = b->data; mutt_buffer_printf(b, "%u:%u", msn_begin, msn_end); } } /** * set_changed_flag - Have the flags of an email changed * @param[in] ctx Context * @param[in] h Email Header * @param[in] local_changes Has the local mailbox been changed? * @param[out] server_changes Set to 1 if the flag has changed * @param[in] flag_name Flag to check, e.g. #MUTT_FLAG * @param[in] old_hd_flag Old header flags * @param[in] new_hd_flag New header flags * @param[in] h_flag Email's value for flag_name * * Sets server_changes to 1 if a change to a flag is made, or in the * case of local_changes, if a change to a flag _would_ have been * made. */ static void set_changed_flag(struct Context *ctx, struct Header *h, int local_changes, int *server_changes, int flag_name, int old_hd_flag, int new_hd_flag, int h_flag) { /* If there are local_changes, we only want to note if the server * flags have changed, so we can set a reopen flag in * cmd_parse_fetch(). We don't want to count a local modification * to the header flag as a "change". */ if ((old_hd_flag == new_hd_flag) && local_changes) return; if (new_hd_flag == h_flag) return; if (server_changes) *server_changes = 1; /* Local changes have priority */ if (!local_changes) mutt_set_flag(ctx, h, flag_name, new_hd_flag); } /** * imap_read_headers - Read headers from the server * @param idata Server data * @param msn_begin First Message Sequence Number * @param msn_end Last Message Sequence Number * @retval num Last MSN * @retval -1 Failure * * Changed to read many headers instead of just one. It will return the msn of * the last message read. It will return a value other than msn_end if mail * comes in while downloading headers (in theory). */ int imap_read_headers(struct ImapData *idata, unsigned int msn_begin, unsigned int msn_end) { char *hdrreq = NULL; FILE *fp = NULL; char tempfile[PATH_MAX]; int msgno, idx; struct ImapHeader h; struct ImapStatus *status = NULL; int rc, mfhrc = 0, oldmsgcount; int fetch_msn_end = 0; unsigned int maxuid = 0; static const char *const want_headers = "DATE FROM SUBJECT TO CC MESSAGE-ID REFERENCES CONTENT-TYPE " "CONTENT-DESCRIPTION IN-REPLY-TO REPLY-TO LINES LIST-POST X-LABEL " "X-ORIGINAL-TO"; struct Progress progress; int retval = -1; bool evalhc = false; #ifdef USE_HCACHE char buf[LONG_STRING]; void *uid_validity = NULL; void *puidnext = NULL; unsigned int uidnext = 0; #endif /* USE_HCACHE */ struct Context *ctx = idata->ctx; if (mutt_bit_isset(idata->capabilities, IMAP4REV1)) { safe_asprintf(&hdrreq, "BODY.PEEK[HEADER.FIELDS (%s%s%s)]", want_headers, ImapHeaders ? " " : "", NONULL(ImapHeaders)); } else if (mutt_bit_isset(idata->capabilities, IMAP4)) { safe_asprintf(&hdrreq, "RFC822.HEADER.LINES (%s%s%s)", want_headers, ImapHeaders ? " " : "", NONULL(ImapHeaders)); } else { /* Unable to fetch headers for lower versions */ mutt_error(_("Unable to fetch headers from this IMAP server version.")); goto error_out_0; } /* instead of downloading all headers and then parsing them, we parse them * as they come in. */ mutt_mktemp(tempfile, sizeof(tempfile)); fp = mutt_file_fopen(tempfile, "w+"); if (!fp) { mutt_error(_("Could not create temporary file %s"), tempfile); goto error_out_0; } unlink(tempfile); /* make sure context has room to hold the mailbox */ while (msn_end > ctx->hdrmax) mx_alloc_memory(ctx); alloc_msn_index(idata, msn_end); idx = ctx->msgcount; oldmsgcount = ctx->msgcount; idata->reopen &= ~(IMAP_REOPEN_ALLOW | IMAP_NEWMAIL_PENDING); idata->new_mail_count = 0; #ifdef USE_HCACHE idata->hcache = imap_hcache_open(idata, NULL); if (idata->hcache && (msn_begin == 1)) { uid_validity = mutt_hcache_fetch_raw(idata->hcache, "/UIDVALIDITY", 12); puidnext = mutt_hcache_fetch_raw(idata->hcache, "/UIDNEXT", 8); if (puidnext) { uidnext = *(unsigned int *) puidnext; mutt_hcache_free(idata->hcache, &puidnext); } if (uid_validity && uidnext && *(unsigned int *) uid_validity == idata->uid_validity) evalhc = true; mutt_hcache_free(idata->hcache, &uid_validity); } if (evalhc) { /* L10N: Comparing the cached data with the IMAP server's data */ mutt_progress_init(&progress, _("Evaluating cache..."), MUTT_PROGRESS_MSG, ReadInc, msn_end); snprintf(buf, sizeof(buf), "UID FETCH 1:%u (UID FLAGS)", uidnext - 1); imap_cmd_start(idata, buf); rc = IMAP_CMD_CONTINUE; for (msgno = 1; rc == IMAP_CMD_CONTINUE; msgno++) { mutt_progress_update(&progress, msgno, -1); memset(&h, 0, sizeof(h)); h.data = new_header_data(); do { rc = imap_cmd_step(idata); if (rc != IMAP_CMD_CONTINUE) break; mfhrc = msg_fetch_header(ctx, &h, idata->buf, NULL); if (mfhrc < 0) continue; if (!h.data->uid) { mutt_debug(2, "skipping hcache FETCH response for message number %d " "missing a UID\n", h.data->msn); continue; } if (h.data->msn < 1 || h.data->msn > msn_end) { mutt_debug(1, "skipping hcache FETCH response for unknown message number %d\n", h.data->msn); continue; } if (idata->msn_index[h.data->msn - 1]) { mutt_debug(2, "skipping hcache FETCH for duplicate message %d\n", h.data->msn); continue; } ctx->hdrs[idx] = imap_hcache_get(idata, h.data->uid); if (ctx->hdrs[idx]) { idata->max_msn = MAX(idata->max_msn, h.data->msn); idata->msn_index[h.data->msn - 1] = ctx->hdrs[idx]; ctx->hdrs[idx]->index = idx; /* messages which have not been expunged are ACTIVE (borrowed from mh * folders) */ ctx->hdrs[idx]->active = true; ctx->hdrs[idx]->read = h.data->read; ctx->hdrs[idx]->old = h.data->old; ctx->hdrs[idx]->deleted = h.data->deleted; ctx->hdrs[idx]->flagged = h.data->flagged; ctx->hdrs[idx]->replied = h.data->replied; ctx->hdrs[idx]->changed = h.data->changed; /* ctx->hdrs[msgno]->received is restored from mutt_hcache_restore */ ctx->hdrs[idx]->data = (void *) (h.data); STAILQ_INIT(&ctx->hdrs[idx]->tags); driver_tags_replace(&ctx->hdrs[idx]->tags, mutt_str_strdup(h.data->flags_remote)); ctx->msgcount++; ctx->size += ctx->hdrs[idx]->content->length; h.data = NULL; idx++; } } while (mfhrc == -1); imap_free_header_data(&h.data); if ((mfhrc < -1) || ((rc != IMAP_CMD_CONTINUE) && (rc != IMAP_CMD_OK))) { imap_hcache_close(idata); goto error_out_1; } } /* Look for the first empty MSN and start there */ while (msn_begin <= msn_end) { if (!idata->msn_index[msn_begin - 1]) break; msn_begin++; } } #endif /* USE_HCACHE */ mutt_progress_init(&progress, _("Fetching message headers..."), MUTT_PROGRESS_MSG, ReadInc, msn_end); while (msn_begin <= msn_end && fetch_msn_end < msn_end) { struct Buffer *b = mutt_buffer_new(); if (evalhc) { /* In case there are holes in the header cache. */ evalhc = false; generate_seqset(b, idata, msn_begin, msn_end); } else mutt_buffer_printf(b, "%u:%u", msn_begin, msn_end); fetch_msn_end = msn_end; char *cmd = NULL; safe_asprintf(&cmd, "FETCH %s (UID FLAGS INTERNALDATE RFC822.SIZE %s)", b->data, hdrreq); imap_cmd_start(idata, cmd); FREE(&cmd); mutt_buffer_free(&b); rc = IMAP_CMD_CONTINUE; for (msgno = msn_begin; rc == IMAP_CMD_CONTINUE; msgno++) { mutt_progress_update(&progress, msgno, -1); rewind(fp); memset(&h, 0, sizeof(h)); h.data = new_header_data(); /* this DO loop does two things: * 1. handles untagged messages, so we can try again on the same msg * 2. fetches the tagged response at the end of the last message. */ do { rc = imap_cmd_step(idata); if (rc != IMAP_CMD_CONTINUE) break; mfhrc = msg_fetch_header(ctx, &h, idata->buf, fp); if (mfhrc < 0) continue; if (!ftello(fp)) { mutt_debug( 2, "msg_fetch_header: ignoring fetch response with no body\n"); continue; } /* make sure we don't get remnants from older larger message headers */ fputs("\n\n", fp); if (h.data->msn < 1 || h.data->msn > fetch_msn_end) { mutt_debug(1, "skipping FETCH response for unknown message number %d\n", h.data->msn); continue; } /* May receive FLAGS updates in a separate untagged response (#2935) */ if (idata->msn_index[h.data->msn - 1]) { mutt_debug(2, "skipping FETCH response for duplicate message %d\n", h.data->msn); continue; } ctx->hdrs[idx] = mutt_header_new(); idata->max_msn = MAX(idata->max_msn, h.data->msn); idata->msn_index[h.data->msn - 1] = ctx->hdrs[idx]; ctx->hdrs[idx]->index = idx; /* messages which have not been expunged are ACTIVE (borrowed from mh * folders) */ ctx->hdrs[idx]->active = true; ctx->hdrs[idx]->read = h.data->read; ctx->hdrs[idx]->old = h.data->old; ctx->hdrs[idx]->deleted = h.data->deleted; ctx->hdrs[idx]->flagged = h.data->flagged; ctx->hdrs[idx]->replied = h.data->replied; ctx->hdrs[idx]->changed = h.data->changed; ctx->hdrs[idx]->received = h.received; ctx->hdrs[idx]->data = (void *) (h.data); STAILQ_INIT(&ctx->hdrs[idx]->tags); driver_tags_replace(&ctx->hdrs[idx]->tags, mutt_str_strdup(h.data->flags_remote)); if (maxuid < h.data->uid) maxuid = h.data->uid; rewind(fp); /* NOTE: if Date: header is missing, mutt_rfc822_read_header depends * on h.received being set */ ctx->hdrs[idx]->env = mutt_rfc822_read_header(fp, ctx->hdrs[idx], 0, 0); /* content built as a side-effect of mutt_rfc822_read_header */ ctx->hdrs[idx]->content->length = h.content_length; ctx->size += h.content_length; #ifdef USE_HCACHE imap_hcache_put(idata, ctx->hdrs[idx]); #endif /* USE_HCACHE */ ctx->msgcount++; h.data = NULL; idx++; } while (mfhrc == -1); imap_free_header_data(&h.data); if ((mfhrc < -1) || ((rc != IMAP_CMD_CONTINUE) && (rc != IMAP_CMD_OK))) { #ifdef USE_HCACHE imap_hcache_close(idata); #endif goto error_out_1; } } /* In case we get new mail while fetching the headers. * * Note: The RFC says we shouldn't get any EXPUNGE responses in the * middle of a FETCH. But just to be cautious, use the current state * of max_msn, not fetch_msn_end to set the next start range. */ if (idata->reopen & IMAP_NEWMAIL_PENDING) { /* update to the last value we actually pulled down */ fetch_msn_end = idata->max_msn; msn_begin = idata->max_msn + 1; msn_end = idata->new_mail_count; while (msn_end > ctx->hdrmax) mx_alloc_memory(ctx); alloc_msn_index(idata, msn_end); idata->reopen &= ~IMAP_NEWMAIL_PENDING; idata->new_mail_count = 0; } } if (maxuid && (status = imap_mboxcache_get(idata, idata->mailbox, 0)) && (status->uidnext < maxuid + 1)) { status->uidnext = maxuid + 1; } #ifdef USE_HCACHE mutt_hcache_store_raw(idata->hcache, "/UIDVALIDITY", 12, &idata->uid_validity, sizeof(idata->uid_validity)); if (maxuid && idata->uidnext < maxuid + 1) { mutt_debug(2, "Overriding UIDNEXT: %u -> %u\n", idata->uidnext, maxuid + 1); idata->uidnext = maxuid + 1; } if (idata->uidnext > 1) { mutt_hcache_store_raw(idata->hcache, "/UIDNEXT", 8, &idata->uidnext, sizeof(idata->uidnext)); } imap_hcache_close(idata); #endif /* USE_HCACHE */ if (ctx->msgcount > oldmsgcount) { /* TODO: it's not clear to me why we are calling mx_alloc_memory * yet again. */ mx_alloc_memory(ctx); mx_update_context(ctx, ctx->msgcount - oldmsgcount); update_context(idata, oldmsgcount); } idata->reopen |= IMAP_REOPEN_ALLOW; retval = msn_end; error_out_1: mutt_file_fclose(&fp); error_out_0: FREE(&hdrreq); return retval; } /** * imap_msg_open - Implements MxOps::msg_open() */ int imap_msg_open(struct Context *ctx, struct Message *msg, int msgno) { struct Envelope *newenv = NULL; char buf[LONG_STRING]; char path[PATH_MAX]; char *pc = NULL; unsigned int bytes; struct Progress progressbar; unsigned int uid; int cacheno; struct ImapCache *cache = NULL; bool retried = false; bool read; int rc; /* Sam's weird courier server returns an OK response even when FETCH * fails. Thanks Sam. */ bool fetched = false; int output_progress; struct ImapData *idata = ctx->data; struct Header *h = ctx->hdrs[msgno]; msg->fp = msg_cache_get(idata, h); if (msg->fp) { if (HEADER_DATA(h)->parsed) return 0; else goto parsemsg; } /* we still do some caching even if imap_cachedir is unset */ /* see if we already have the message in our cache */ cacheno = HEADER_DATA(h)->uid % IMAP_CACHE_LEN; cache = &idata->cache[cacheno]; if (cache->path) { /* don't treat cache errors as fatal, just fall back. */ if (cache->uid == HEADER_DATA(h)->uid && (msg->fp = fopen(cache->path, "r"))) return 0; else { unlink(cache->path); FREE(&cache->path); } } /* This function is called in a few places after endwin() * e.g. mutt_pipe_message(). */ output_progress = !isendwin(); if (output_progress) mutt_message(_("Fetching message...")); msg->fp = msg_cache_put(idata, h); if (!msg->fp) { cache->uid = HEADER_DATA(h)->uid; mutt_mktemp(path, sizeof(path)); cache->path = mutt_str_strdup(path); msg->fp = mutt_file_fopen(path, "w+"); if (!msg->fp) { FREE(&cache->path); return -1; } } /* mark this header as currently inactive so the command handler won't * also try to update it. HACK until all this code can be moved into the * command handler */ h->active = false; snprintf(buf, sizeof(buf), "UID FETCH %u %s", HEADER_DATA(h)->uid, (mutt_bit_isset(idata->capabilities, IMAP4REV1) ? (ImapPeek ? "BODY.PEEK[]" : "BODY[]") : "RFC822")); imap_cmd_start(idata, buf); do { rc = imap_cmd_step(idata); if (rc != IMAP_CMD_CONTINUE) break; pc = idata->buf; pc = imap_next_word(pc); pc = imap_next_word(pc); if (mutt_str_strncasecmp("FETCH", pc, 5) == 0) { while (*pc) { pc = imap_next_word(pc); if (pc[0] == '(') pc++; if (mutt_str_strncasecmp("UID", pc, 3) == 0) { pc = imap_next_word(pc); if (mutt_str_atoui(pc, &uid) < 0) goto bail; if (uid != HEADER_DATA(h)->uid) { mutt_error(_( "The message index is incorrect. Try reopening the mailbox.")); } } else if ((mutt_str_strncasecmp("RFC822", pc, 6) == 0) || (mutt_str_strncasecmp("BODY[]", pc, 6) == 0)) { pc = imap_next_word(pc); if (imap_get_literal_count(pc, &bytes) < 0) { imap_error("imap_msg_open()", buf); goto bail; } if (output_progress) { mutt_progress_init(&progressbar, _("Fetching message..."), MUTT_PROGRESS_SIZE, NetInc, bytes); } if (imap_read_literal(msg->fp, idata, bytes, output_progress ? &progressbar : NULL) < 0) { goto bail; } /* pick up trailing line */ rc = imap_cmd_step(idata); if (rc != IMAP_CMD_CONTINUE) goto bail; pc = idata->buf; fetched = true; } /* UW-IMAP will provide a FLAGS update here if the FETCH causes a * change (eg from \Unseen to \Seen). * Uncommitted changes in neomutt take precedence. If we decide to * incrementally update flags later, this won't stop us syncing */ else if ((mutt_str_strncasecmp("FLAGS", pc, 5) == 0) && !h->changed) { pc = imap_set_flags(idata, h, pc, NULL); if (!pc) goto bail; } } } } while (rc == IMAP_CMD_CONTINUE); /* see comment before command start. */ h->active = true; fflush(msg->fp); if (ferror(msg->fp)) { mutt_perror(cache->path); goto bail; } if (rc != IMAP_CMD_OK) goto bail; if (!fetched || !imap_code(idata->buf)) goto bail; msg_cache_commit(idata, h); parsemsg: /* Update the header information. Previously, we only downloaded a * portion of the headers, those required for the main display. */ rewind(msg->fp); /* It may be that the Status header indicates a message is read, but the * IMAP server doesn't know the message has been \Seen. So we capture * the server's notion of 'read' and if it differs from the message info * picked up in mutt_rfc822_read_header, we mark the message (and context * changed). Another possibility: ignore Status on IMAP? */ read = h->read; newenv = mutt_rfc822_read_header(msg->fp, h, 0, 0); mutt_env_merge(h->env, &newenv); /* see above. We want the new status in h->read, so we unset it manually * and let mutt_set_flag set it correctly, updating context. */ if (read != h->read) { h->read = read; mutt_set_flag(ctx, h, MUTT_NEW, read); } h->lines = 0; fgets(buf, sizeof(buf), msg->fp); while (!feof(msg->fp)) { h->lines++; fgets(buf, sizeof(buf), msg->fp); } h->content->length = ftell(msg->fp) - h->content->offset; mutt_clear_error(); rewind(msg->fp); HEADER_DATA(h)->parsed = true; /* retry message parse if cached message is empty */ if (!retried && ((h->lines == 0) || (h->content->length == 0))) { imap_cache_del(idata, h); retried = true; goto parsemsg; } return 0; bail: mutt_file_fclose(&msg->fp); imap_cache_del(idata, h); if (cache->path) { unlink(cache->path); FREE(&cache->path); } return -1; } /** * imap_msg_close - Close an email * * @note May also return EOF Failure, see errno */ int imap_msg_close(struct Context *ctx, struct Message *msg) { return mutt_file_fclose(&msg->fp); } /** * imap_msg_commit - Implements MxOps::msg_commit() * * @note May also return EOF Failure, see errno */ int imap_msg_commit(struct Context *ctx, struct Message *msg) { int r = mutt_file_fclose(&msg->fp); if (r != 0) return r; return imap_append_message(ctx, msg); } /** * imap_append_message - Write an email back to the server * @param ctx Context * @param msg Message to save * @retval 0 Success * @retval -1 Failure */ int imap_append_message(struct Context *ctx, struct Message *msg) { FILE *fp = NULL; char buf[LONG_STRING]; char mbox[LONG_STRING]; char mailbox[LONG_STRING]; char internaldate[IMAP_DATELEN]; char imap_flags[SHORT_STRING]; size_t len; struct Progress progressbar; size_t sent; int c, last; struct ImapMbox mx; int rc; struct ImapData *idata = ctx->data; if (imap_parse_path(ctx->path, &mx)) return -1; imap_fix_path(idata, mx.mbox, mailbox, sizeof(mailbox)); if (!*mailbox) mutt_str_strfcpy(mailbox, "INBOX", sizeof(mailbox)); fp = fopen(msg->path, "r"); if (!fp) { mutt_perror(msg->path); goto fail; } /* currently we set the \Seen flag on all messages, but probably we * should scan the message Status header for flag info. Since we're * already rereading the whole file for length it isn't any more * expensive (it'd be nice if we had the file size passed in already * by the code that writes the file, but that's a lot of changes. * Ideally we'd have a Header structure with flag info here... */ for (last = EOF, len = 0; (c = fgetc(fp)) != EOF; last = c) { if (c == '\n' && last != '\r') len++; len++; } rewind(fp); mutt_progress_init(&progressbar, _("Uploading message..."), MUTT_PROGRESS_SIZE, NetInc, len); imap_munge_mbox_name(idata, mbox, sizeof(mbox), mailbox); mutt_date_make_imap(internaldate, sizeof(internaldate), msg->received); imap_flags[0] = imap_flags[1] = 0; if (msg->flags.read) mutt_str_strcat(imap_flags, sizeof(imap_flags), " \\Seen"); if (msg->flags.replied) mutt_str_strcat(imap_flags, sizeof(imap_flags), " \\Answered"); if (msg->flags.flagged) mutt_str_strcat(imap_flags, sizeof(imap_flags), " \\Flagged"); if (msg->flags.draft) mutt_str_strcat(imap_flags, sizeof(imap_flags), " \\Draft"); snprintf(buf, sizeof(buf), "APPEND %s (%s) \"%s\" {%lu}", mbox, imap_flags + 1, internaldate, (unsigned long) len); imap_cmd_start(idata, buf); do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (rc != IMAP_CMD_RESPOND) { mutt_debug(1, "#1 command failed: %s\n", idata->buf); char *pc = idata->buf + SEQLEN; SKIPWS(pc); pc = imap_next_word(pc); mutt_error("%s", pc); mutt_file_fclose(&fp); goto fail; } for (last = EOF, sent = len = 0; (c = fgetc(fp)) != EOF; last = c) { if (c == '\n' && last != '\r') buf[len++] = '\r'; buf[len++] = c; if (len > sizeof(buf) - 3) { sent += len; flush_buffer(buf, &len, idata->conn); mutt_progress_update(&progressbar, sent, -1); } } if (len) flush_buffer(buf, &len, idata->conn); mutt_socket_send(idata->conn, "\r\n"); mutt_file_fclose(&fp); do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (!imap_code(idata->buf)) { mutt_debug(1, "#2 command failed: %s\n", idata->buf); char *pc = idata->buf + SEQLEN; SKIPWS(pc); pc = imap_next_word(pc); mutt_error("%s", pc); goto fail; } FREE(&mx.mbox); return 0; fail: FREE(&mx.mbox); return -1; } /** * imap_copy_messages - Server COPY messages to another folder * @param ctx Context * @param h Header of the email * @param dest Destination folder * @param delete Delete the original? * @retval -1 Error * @retval 0 Success * @retval 1 Non-fatal error - try fetch/append */ int imap_copy_messages(struct Context *ctx, struct Header *h, char *dest, int delete) { struct Buffer cmd, sync_cmd; char mbox[PATH_MAX]; char mmbox[PATH_MAX]; char prompt[PATH_MAX + 64]; int rc; struct ImapMbox mx; int err_continue = MUTT_NO; int triedcreate = 0; struct ImapData *idata = ctx->data; if (imap_parse_path(dest, &mx)) { mutt_debug(1, "bad destination %s\n", dest); return -1; } /* check that the save-to folder is in the same account */ if (mutt_account_match(&(idata->conn->account), &(mx.account)) == 0) { mutt_debug(3, "%s not same server as %s\n", dest, ctx->path); return 1; } if (h && h->attach_del) { mutt_debug(3, "#1 Message contains attachments to be deleted\n"); return 1; } imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); if (!*mbox) mutt_str_strfcpy(mbox, "INBOX", sizeof(mbox)); imap_munge_mbox_name(idata, mmbox, sizeof(mmbox), mbox); /* loop in case of TRYCREATE */ do { mutt_buffer_init(&sync_cmd); mutt_buffer_init(&cmd); /* Null Header* means copy tagged messages */ if (!h) { /* if any messages have attachments to delete, fall through to FETCH * and APPEND. TODO: Copy what we can with COPY, fall through for the * remainder. */ for (int i = 0; i < ctx->msgcount; i++) { if (!message_is_tagged(ctx, i)) continue; if (ctx->hdrs[i]->attach_del) { mutt_debug(3, "#2 Message contains attachments to be deleted\n"); return 1; } if (ctx->hdrs[i]->active && ctx->hdrs[i]->changed) { rc = imap_sync_message_for_copy(idata, ctx->hdrs[i], &sync_cmd, &err_continue); if (rc < 0) { mutt_debug(1, "#1 could not sync\n"); goto out; } } } rc = imap_exec_msgset(idata, "UID COPY", mmbox, MUTT_TAG, 0, 0); if (!rc) { mutt_debug(1, "No messages tagged\n"); rc = -1; goto out; } else if (rc < 0) { mutt_debug(1, "#1 could not queue copy\n"); goto out; } else { mutt_message(ngettext("Copying %d message to %s...", "Copying %d messages to %s...", rc), rc, mbox); } } else { mutt_message(_("Copying message %d to %s..."), h->index + 1, mbox); mutt_buffer_printf(&cmd, "UID COPY %u %s", HEADER_DATA(h)->uid, mmbox); if (h->active && h->changed) { rc = imap_sync_message_for_copy(idata, h, &sync_cmd, &err_continue); if (rc < 0) { mutt_debug(1, "#2 could not sync\n"); goto out; } } rc = imap_exec(idata, cmd.data, IMAP_CMD_QUEUE); if (rc < 0) { mutt_debug(1, "#2 could not queue copy\n"); goto out; } } /* let's get it on */ rc = imap_exec(idata, NULL, IMAP_CMD_FAIL_OK); if (rc == -2) { if (triedcreate) { mutt_debug(1, "Already tried to create mailbox %s\n", mbox); break; } /* bail out if command failed for reasons other than nonexistent target */ if (mutt_str_strncasecmp(imap_get_qualifier(idata->buf), "[TRYCREATE]", 11) != 0) break; mutt_debug(3, "server suggests TRYCREATE\n"); snprintf(prompt, sizeof(prompt), _("Create %s?"), mbox); if (Confirmcreate && mutt_yesorno(prompt, 1) != MUTT_YES) { mutt_clear_error(); goto out; } if (imap_create_mailbox(idata, mbox) < 0) break; triedcreate = 1; } } while (rc == -2); if (rc != 0) { imap_error("imap_copy_messages", idata->buf); goto out; } /* cleanup */ if (delete) { if (!h) { for (int i = 0; i < ctx->msgcount; i++) { if (!message_is_tagged(ctx, i)) continue; mutt_set_flag(ctx, ctx->hdrs[i], MUTT_DELETE, 1); mutt_set_flag(ctx, ctx->hdrs[i], MUTT_PURGE, 1); if (DeleteUntag) mutt_set_flag(ctx, ctx->hdrs[i], MUTT_TAG, 0); } } else { mutt_set_flag(ctx, h, MUTT_DELETE, 1); mutt_set_flag(ctx, h, MUTT_PURGE, 1); if (DeleteUntag) mutt_set_flag(ctx, h, MUTT_TAG, 0); } } rc = 0; out: if (cmd.data) FREE(&cmd.data); if (sync_cmd.data) FREE(&sync_cmd.data); FREE(&mx.mbox); return (rc < 0) ? -1 : rc; } /** * imap_cache_del - Delete an email from the body cache * @param idata Server data * @param h Email header * @retval 0 Success * @retval -1 Failure */ int imap_cache_del(struct ImapData *idata, struct Header *h) { if (!idata || !h) return -1; idata->bcache = msg_cache_open(idata); char id[64]; snprintf(id, sizeof(id), "%u-%u", idata->uid_validity, HEADER_DATA(h)->uid); return mutt_bcache_del(idata->bcache, id); } /** * imap_cache_clean - Delete all the entries in the message cache * @param idata Server data * @retval 0 Always */ int imap_cache_clean(struct ImapData *idata) { idata->bcache = msg_cache_open(idata); mutt_bcache_list(idata->bcache, msg_cache_clean_cb, idata); return 0; } /** * imap_free_header_data - free ImapHeader structure * @param data Header data to free */ void imap_free_header_data(struct ImapHeaderData **data) { if (!data || !*data) return; /* this should be safe even if the list wasn't used */ FREE(&((*data)->flags_system)); FREE(&((*data)->flags_remote)); FREE(data); } /** * imap_set_flags - fill the message header according to the server flags * @param[in] idata Server data * @param[in] h Email Header * @param[in] s Command string * @param[out] server_changes Flags have changed * @retval ptr The end of flags string * @retval NULL Failure * * Expects a flags line of the form "FLAGS (flag flag ...)" * * imap_set_flags: fill out the message header according to the flags from * the server. Expects a flags line of the form "FLAGS (flag flag ...)" * * Sets server_changes to 1 if a change to a flag is made, or in the * case of h->changed, if a change to a flag _would_ have been * made. */ char *imap_set_flags(struct ImapData *idata, struct Header *h, char *s, int *server_changes) { struct Context *ctx = idata->ctx; struct ImapHeader newh = { 0 }; struct ImapHeaderData old_hd; bool readonly; int local_changes; local_changes = h->changed; struct ImapHeaderData *hd = h->data; newh.data = hd; memcpy(&old_hd, hd, sizeof(old_hd)); mutt_debug(2, "parsing FLAGS\n"); s = msg_parse_flags(&newh, s); if (!s) return NULL; /* Update tags system */ driver_tags_replace(&h->tags, mutt_str_strdup(hd->flags_remote)); /* YAUH (yet another ugly hack): temporarily set context to * read-write even if it's read-only, so *server* updates of * flags can be processed by mutt_set_flag. ctx->changed must * be restored afterwards */ readonly = ctx->readonly; ctx->readonly = false; /* This is redundant with the following two checks. Removing: * mutt_set_flag (ctx, h, MUTT_NEW, !(hd->read || hd->old)); */ set_changed_flag(ctx, h, local_changes, server_changes, MUTT_OLD, old_hd.old, hd->old, h->old); set_changed_flag(ctx, h, local_changes, server_changes, MUTT_READ, old_hd.read, hd->read, h->read); set_changed_flag(ctx, h, local_changes, server_changes, MUTT_DELETE, old_hd.deleted, hd->deleted, h->deleted); set_changed_flag(ctx, h, local_changes, server_changes, MUTT_FLAG, old_hd.flagged, hd->flagged, h->flagged); set_changed_flag(ctx, h, local_changes, server_changes, MUTT_REPLIED, old_hd.replied, hd->replied, h->replied); /* this message is now definitively *not* changed (mutt_set_flag * marks things changed as a side-effect) */ if (!local_changes) h->changed = false; ctx->changed &= !readonly; ctx->readonly = readonly; return s; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_244_0
crossvul-cpp_data_bad_4775_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR L EEEEE % % R R L E % % RRRR L EEE % % R R L E % % R R LLLLL EEEEE % % % % % % Read URT RLE Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/pixel.h" #include "magick/property.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s R L E % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsRLE() returns MagickTrue if the image format type, identified by the % magick string, is RLE. % % The format of the ReadRLEImage method is: % % MagickBooleanType IsRLE(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsRLE(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\122\314",2) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadRLEImage() reads a run-length encoded Utah Raster Toolkit % image file and returns it. It allocates the memory necessary for the new % Image structure and returns a pointer to the new image. % % The format of the ReadRLEImage method is: % % Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % */ static Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define SkipLinesOp 0x01 #define SetColorOp 0x02 #define SkipPixelsOp 0x03 #define ByteDataOp 0x05 #define RunDataOp 0x06 #define EOFOp 0x07 char magick[12]; Image *image; IndexPacket index; int opcode, operand, status; MagickStatusType flags; MagickSizeType number_pixels; MemoryInfo *pixel_info; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; register ssize_t i; register unsigned char *p; size_t bits_per_pixel, map_length, number_colormaps, number_planes, number_planes_filled, one, offset, pixel_info_length; ssize_t count, y; unsigned char background_color[256], *colormap, pixel, plane, *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Determine if this a RLE file. */ count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 2) || (memcmp(magick,"\122\314",2) != 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); do { /* Read image header. */ image->page.x=ReadBlobLSBShort(image); image->page.y=ReadBlobLSBShort(image); image->columns=ReadBlobLSBShort(image); image->rows=ReadBlobLSBShort(image); flags=(MagickStatusType) ReadBlobByte(image); image->matte=flags & 0x04 ? MagickTrue : MagickFalse; number_planes=(size_t) ReadBlobByte(image); bits_per_pixel=(size_t) ReadBlobByte(image); number_colormaps=(size_t) ReadBlobByte(image); map_length=(unsigned char) ReadBlobByte(image); if (map_length >= 32) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); one=1; map_length=one << map_length; if ((number_planes == 0) || (number_planes == 2) || ((flags & 0x04) && (number_colormaps > 254)) || (bits_per_pixel != 8) || (image->columns == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (flags & 0x02) { /* No background color-- initialize to black. */ for (i=0; i < (ssize_t) number_planes; i++) background_color[i]=0; (void) ReadBlobByte(image); } else { /* Initialize background color. */ p=background_color; for (i=0; i < (ssize_t) number_planes; i++) *p++=(unsigned char) ReadBlobByte(image); } if ((number_planes & 0x01) == 0) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } colormap=(unsigned char *) NULL; if (number_colormaps != 0) { /* Read image colormaps. */ colormap=(unsigned char *) AcquireQuantumMemory(number_colormaps, 3*map_length*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; for (i=0; i < (ssize_t) number_colormaps; i++) for (x=0; x < (ssize_t) map_length; x++) *p++=(unsigned char) ScaleShortToQuantum(ReadBlobLSBShort(image)); } if ((flags & 0x08) != 0) { char *comment; size_t length; /* Read image comment. */ length=ReadBlobLSBShort(image); if (length != 0) { comment=(char *) AcquireQuantumMemory(length,sizeof(*comment)); if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,length-1,(unsigned char *) comment); comment[length-1]='\0'; (void) SetImageProperty(image,"comment",comment); comment=DestroyString(comment); if ((length & 0x01) == 0) (void) ReadBlobByte(image); } } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Allocate RLE pixels. */ if (image->matte != MagickFalse) number_planes++; number_pixels=(MagickSizeType) image->columns*image->rows; number_planes_filled=(number_planes % 2 == 0) ? number_planes : number_planes+1; if ((number_pixels*number_planes_filled) != (size_t) (number_pixels* number_planes_filled)) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory(image->columns,image->rows* MagickMax(number_planes_filled,4)*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info_length=image->columns*image->rows* MagickMax(number_planes_filled,4); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((flags & 0x01) && !(flags & 0x02)) { ssize_t j; /* Set background color. */ p=pixels; for (i=0; i < (ssize_t) number_pixels; i++) { if (image->matte == MagickFalse) for (j=0; j < (ssize_t) number_planes; j++) *p++=background_color[j]; else { for (j=0; j < (ssize_t) (number_planes-1); j++) *p++=background_color[j]; *p++=0; /* initialize matte channel */ } } } /* Read runlength-encoded image. */ plane=0; x=0; y=0; opcode=ReadBlobByte(image); do { switch (opcode & 0x3f) { case SkipLinesOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); x=0; y+=operand; break; } case SetColorOp: { operand=ReadBlobByte(image); plane=(unsigned char) operand; if (plane == 255) plane=(unsigned char) (number_planes-1); x=0; break; } case SkipPixelsOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); x+=operand; break; } case ByteDataOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); offset=((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane; operand++; if (offset+((size_t) operand*number_planes) > pixel_info_length) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } p=pixels+offset; for (i=0; i < (ssize_t) operand; i++) { pixel=(unsigned char) ReadBlobByte(image); if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } if (operand & 0x01) (void) ReadBlobByte(image); x+=operand; break; } case RunDataOp: { operand=ReadBlobByte(image); if (opcode & 0x40) operand=ReadBlobLSBSignedShort(image); pixel=(unsigned char) ReadBlobByte(image); (void) ReadBlobByte(image); operand++; offset=((image->rows-y-1)*image->columns*number_planes)+x* number_planes+plane; p=pixels+offset; if (offset+((size_t) operand*number_planes) > pixel_info_length) { if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } for (i=0; i < (ssize_t) operand; i++) { if ((y < (ssize_t) image->rows) && ((x+i) < (ssize_t) image->columns)) *p=pixel; p+=number_planes; } x+=operand; break; } default: break; } opcode=ReadBlobByte(image); } while (((opcode & 0x3f) != EOFOp) && (opcode != EOF)); if (number_colormaps != 0) { MagickStatusType mask; /* Apply colormap affineation to image. */ mask=(MagickStatusType) (map_length-1); p=pixels; x=(ssize_t) number_planes; if (number_colormaps == 1) for (i=0; i < (ssize_t) number_pixels; i++) { if (IsValidColormapIndex(image,*p & mask,&index,exception) == MagickFalse) break; *p=colormap[(ssize_t) index]; p++; } else if ((number_planes >= 3) && (number_colormaps >= 3)) for (i=0; i < (ssize_t) number_pixels; i++) for (x=0; x < (ssize_t) number_planes; x++) { if (IsValidColormapIndex(image,(size_t) (x*map_length+ (*p & mask)),&index,exception) == MagickFalse) break; *p=colormap[(ssize_t) index]; p++; } if ((i < (ssize_t) number_pixels) || (x < (ssize_t) number_planes)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } } /* Initialize image structure. */ if (number_planes >= 3) { /* Convert raster image to DirectClass pixel packets. */ p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelBlue(q,ScaleCharToQuantum(*p++)); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleCharToQuantum(*p++)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else { /* Create colormap. */ if (number_colormaps == 0) map_length=256; if (AcquireImageColormap(image,map_length) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=colormap; if (number_colormaps == 1) for (i=0; i < (ssize_t) image->colors; i++) { /* Pseudocolor. */ image->colormap[i].red=ScaleCharToQuantum((unsigned char) i); image->colormap[i].green=ScaleCharToQuantum((unsigned char) i); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i); } else if (number_colormaps > 1) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p); image->colormap[i].green=ScaleCharToQuantum(*(p+map_length)); image->colormap[i].blue=ScaleCharToQuantum(*(p+map_length*2)); p++; } p=pixels; if (image->matte == MagickFalse) { /* Convert raster image to PseudoClass pixel packets. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*p++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); } else { /* Image has a matte channel-- promote to DirectClass. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelRed(q,image->colormap[(ssize_t) index].red); if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelGreen(q,image->colormap[(ssize_t) index].green); if (IsValidColormapIndex(image,*p++,&index,exception) == MagickFalse) break; SetPixelBlue(q,image->colormap[(ssize_t) index].blue); SetPixelAlpha(q,ScaleCharToQuantum(*p++)); q++; } if (x < (ssize_t) image->columns) break; if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } image->colormap=(PixelPacket *) RelinquishMagickMemory( image->colormap); image->storage_class=DirectClass; image->colors=0; } } if (number_colormaps != 0) colormap=(unsigned char *) RelinquishMagickMemory(colormap); pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; (void) ReadBlobByte(image); count=ReadBlob(image,2,(unsigned char *) magick); if ((count != 0) && (memcmp(magick,"\122\314",2) == 0)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (memcmp(magick,"\122\314",2) == 0)); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterRLEImage() adds attributes for the RLE image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterRLEImage method is: % % size_t RegisterRLEImage(void) % */ ModuleExport size_t RegisterRLEImage(void) { MagickInfo *entry; entry=SetMagickInfo("RLE"); entry->decoder=(DecodeImageHandler *) ReadRLEImage; entry->magick=(IsImageFormatHandler *) IsRLE; entry->adjoin=MagickFalse; entry->description=ConstantString("Utah Run length encoded image"); entry->module=ConstantString("RLE"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r R L E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterRLEImage() removes format registrations made by the % RLE module from the list of supported formats. % % The format of the UnregisterRLEImage method is: % % UnregisterRLEImage(void) % */ ModuleExport void UnregisterRLEImage(void) { (void) UnregisterMagickInfo("RLE"); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4775_1
crossvul-cpp_data_good_246_0
/** * @file * IMAP helper functions * * @authors * Copyright (C) 1996-1998,2010,2012-2013 Michael R. Elkins <me@mutt.org> * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009,2012 Brendan Cully <brendan@kublai.com> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page imap_util IMAP helper functions * * IMAP helper functions */ #include "config.h" #include <ctype.h> #include <errno.h> #include <netdb.h> #include <netinet/in.h> #include <signal.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include "imap_private.h" #include "mutt/mutt.h" #include "conn/conn.h" #include "bcache.h" #include "context.h" #include "globals.h" #include "header.h" #include "imap/imap.h" #include "mailbox.h" #include "message.h" #include "mutt_account.h" #include "mutt_socket.h" #include "mx.h" #include "options.h" #include "protos.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif /** * imap_expand_path - Canonicalise an IMAP path * @param path Buffer containing path * @param len Buffer length * @retval 0 Success * @retval -1 Error * * IMAP implementation of mutt_expand_path. Rewrite an IMAP path in canonical * and absolute form. The buffer is rewritten in place with the canonical IMAP * path. * * Function can fail if imap_parse_path() or url_tostring() fail, * of if the buffer isn't large enough. */ int imap_expand_path(char *path, size_t len) { struct ImapMbox mx; struct ImapData *idata = NULL; struct Url url; char fixedpath[LONG_STRING]; int rc; if (imap_parse_path(path, &mx) < 0) return -1; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); mutt_account_tourl(&mx.account, &url); imap_fix_path(idata, mx.mbox, fixedpath, sizeof(fixedpath)); url.path = fixedpath; rc = url_tostring(&url, path, len, U_DECODE_PASSWD); FREE(&mx.mbox); return rc; } /** * imap_get_parent - Get an IMAP folder's parent * @param output Buffer for the result * @param mbox Mailbox whose parent is to be determined * @param olen Length of the buffer * @param delim Path delimiter */ void imap_get_parent(char *output, const char *mbox, size_t olen, char delim) { int n; /* Make a copy of the mailbox name, but only if the pointers are different */ if (mbox != output) mutt_str_strfcpy(output, mbox, olen); n = mutt_str_strlen(output); /* Let's go backwards until the next delimiter * * If output[n] is a '/', the first n-- will allow us * to ignore it. If it isn't, then output looks like * "/aaaaa/bbbb". There is at least one "b", so we can't skip * the "/" after the 'a's. * * If output == '/', then n-- => n == 0, so the loop ends * immediately */ for (n--; n >= 0 && output[n] != delim; n--) ; /* We stopped before the beginning. There is a trailing * slash. */ if (n > 0) { /* Strip the trailing delimiter. */ output[n] = '\0'; } else { output[0] = (n == 0) ? delim : '\0'; } } /** * imap_get_parent_path - Get the path of the parent folder * @param output Buffer for the result * @param path Mailbox whose parent is to be determined * @param olen Length of the buffer * * Provided an imap path, returns in output the parent directory if * existent. Else returns the same path. */ void imap_get_parent_path(char *output, const char *path, size_t olen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) { mutt_str_strfcpy(output, path, olen); return; } idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) { mutt_str_strfcpy(output, path, olen); return; } /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Gets the parent mbox in mbox */ imap_get_parent(mbox, mbox, sizeof(mbox), idata->delim); /* Returns a fully qualified IMAP url */ imap_qualify_path(output, olen, &mx, mbox); FREE(&mx.mbox); } /** * imap_clean_path - Cleans an IMAP path using imap_fix_path * @param path Path to be cleaned * @param plen Length of the buffer * * Does it in place. */ void imap_clean_path(char *path, size_t plen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) return; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) return; /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Returns a fully qualified IMAP url */ imap_qualify_path(path, plen, &mx, mbox); } #ifdef USE_HCACHE /** * imap_hcache_namer - Generate a filename for the header cache * @param path Path for the header cache file * @param dest Buffer for result * @param dlen Length of buffer * @retval num Chars written to dest */ static int imap_hcache_namer(const char *path, char *dest, size_t dlen) { return snprintf(dest, dlen, "%s.hcache", path); } /** * imap_hcache_open - Open a header cache * @param idata Server data * @param path Path to the header cache * @retval ptr HeaderCache * @retval NULL Failure */ header_cache_t *imap_hcache_open(struct ImapData *idata, const char *path) { struct ImapMbox mx; struct Url url; char cachepath[PATH_MAX]; char mbox[PATH_MAX]; if (path) imap_cachepath(idata, path, mbox, sizeof(mbox)); else { if (!idata->ctx || imap_parse_path(idata->ctx->path, &mx) < 0) return NULL; imap_cachepath(idata, mx.mbox, mbox, sizeof(mbox)); FREE(&mx.mbox); } if (strstr(mbox, "/../") || (strcmp(mbox, "..") == 0) || (strncmp(mbox, "../", 3) == 0)) return NULL; size_t len = strlen(mbox); if ((len > 3) && (strcmp(mbox + len - 3, "/..") == 0)) return NULL; mutt_account_tourl(&idata->conn->account, &url); url.path = mbox; url_tostring(&url, cachepath, sizeof(cachepath), U_PATH); return mutt_hcache_open(HeaderCache, cachepath, imap_hcache_namer); } /** * imap_hcache_close - Close the header cache * @param idata Server data */ void imap_hcache_close(struct ImapData *idata) { if (!idata->hcache) return; mutt_hcache_close(idata->hcache); idata->hcache = NULL; } /** * imap_hcache_get - Get a header cache entry by its UID * @param idata Server data * @param uid UID to find * @retval ptr Email Header * @retval NULL Failure */ struct Header *imap_hcache_get(struct ImapData *idata, unsigned int uid) { char key[16]; void *uv = NULL; struct Header *h = NULL; if (!idata->hcache) return NULL; sprintf(key, "/%u", uid); uv = mutt_hcache_fetch(idata->hcache, key, imap_hcache_keylen(key)); if (uv) { if (*(unsigned int *) uv == idata->uid_validity) h = mutt_hcache_restore(uv); else mutt_debug(3, "hcache uidvalidity mismatch: %u\n", *(unsigned int *) uv); mutt_hcache_free(idata->hcache, &uv); } return h; } /** * imap_hcache_put - Add an entry to the header cache * @param idata Server data * @param h Email Header * @retval 0 Success * @retval -1 Failure */ int imap_hcache_put(struct ImapData *idata, struct Header *h) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", HEADER_DATA(h)->uid); return mutt_hcache_store(idata->hcache, key, imap_hcache_keylen(key), h, idata->uid_validity); } /** * imap_hcache_del - Delete an item from the header cache * @param idata Server data * @param uid UID of entry to delete * @retval 0 Success * @retval -1 Failure */ int imap_hcache_del(struct ImapData *idata, unsigned int uid) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", uid); return mutt_hcache_delete(idata->hcache, key, imap_hcache_keylen(key)); } #endif /** * imap_parse_path - Parse an IMAP mailbox name into name,host,port * @param path Mailbox path to parse * @param mx An IMAP mailbox * @retval 0 Success * @retval -1 Failure * * Given an IMAP mailbox name, return host, port and a path IMAP servers will * recognize. mx.mbox is malloc'd, caller must free it */ int imap_parse_path(const char *path, struct ImapMbox *mx) { static unsigned short ImapPort = 0; static unsigned short ImapsPort = 0; struct servent *service = NULL; struct Url url; char *c = NULL; if (!ImapPort) { service = getservbyname("imap", "tcp"); if (service) ImapPort = ntohs(service->s_port); else ImapPort = IMAP_PORT; mutt_debug(3, "Using default IMAP port %d\n", ImapPort); } if (!ImapsPort) { service = getservbyname("imaps", "tcp"); if (service) ImapsPort = ntohs(service->s_port); else ImapsPort = IMAP_SSL_PORT; mutt_debug(3, "Using default IMAPS port %d\n", ImapsPort); } /* Defaults */ memset(&mx->account, 0, sizeof(mx->account)); mx->account.port = ImapPort; mx->account.type = MUTT_ACCT_TYPE_IMAP; c = mutt_str_strdup(path); url_parse(&url, c); if (url.scheme == U_IMAP || url.scheme == U_IMAPS) { if (mutt_account_fromurl(&mx->account, &url) < 0 || !*mx->account.host) { url_free(&url); FREE(&c); return -1; } mx->mbox = mutt_str_strdup(url.path); if (url.scheme == U_IMAPS) mx->account.flags |= MUTT_ACCT_SSL; url_free(&url); FREE(&c); } /* old PINE-compatibility code */ else { url_free(&url); FREE(&c); char tmp[128]; if (sscanf(path, "{%127[^}]}", tmp) != 1) return -1; c = strchr(path, '}'); if (!c) return -1; else { /* walk past closing '}' */ mx->mbox = mutt_str_strdup(c + 1); } c = strrchr(tmp, '@'); if (c) { *c = '\0'; mutt_str_strfcpy(mx->account.user, tmp, sizeof(mx->account.user)); mutt_str_strfcpy(tmp, c + 1, sizeof(tmp)); mx->account.flags |= MUTT_ACCT_USER; } const int n = sscanf(tmp, "%127[^:/]%127s", mx->account.host, tmp); if (n < 1) { mutt_debug(1, "NULL host in %s\n", path); FREE(&mx->mbox); return -1; } if (n > 1) { if (sscanf(tmp, ":%hu%127s", &(mx->account.port), tmp) >= 1) mx->account.flags |= MUTT_ACCT_PORT; if (sscanf(tmp, "/%s", tmp) == 1) { if (mutt_str_strncmp(tmp, "ssl", 3) == 0) mx->account.flags |= MUTT_ACCT_SSL; else { mutt_debug(1, "Unknown connection type in %s\n", path); FREE(&mx->mbox); return -1; } } } } if ((mx->account.flags & MUTT_ACCT_SSL) && !(mx->account.flags & MUTT_ACCT_PORT)) mx->account.port = ImapsPort; return 0; } /** * imap_mxcmp - Compare mailbox names, giving priority to INBOX * @param mx1 First mailbox name * @param mx2 Second mailbox name * @retval <0 First mailbox precedes Second mailbox * @retval 0 Mailboxes are the same * @retval >0 Second mailbox precedes First mailbox * * Like a normal sort function except that "INBOX" will be sorted to the * beginning of the list. */ int imap_mxcmp(const char *mx1, const char *mx2) { char *b1 = NULL; char *b2 = NULL; int rc; if (!mx1 || !*mx1) mx1 = "INBOX"; if (!mx2 || !*mx2) mx2 = "INBOX"; if ((mutt_str_strcasecmp(mx1, "INBOX") == 0) && (mutt_str_strcasecmp(mx2, "INBOX") == 0)) { return 0; } b1 = mutt_mem_malloc(strlen(mx1) + 1); b2 = mutt_mem_malloc(strlen(mx2) + 1); imap_fix_path(NULL, mx1, b1, strlen(mx1) + 1); imap_fix_path(NULL, mx2, b2, strlen(mx2) + 1); rc = mutt_str_strcmp(b1, b2); FREE(&b1); FREE(&b2); return rc; } /** * imap_pretty_mailbox - Prettify an IMAP mailbox name * @param path Mailbox name to be tidied * * Called by mutt_pretty_mailbox() to make IMAP paths look nice. */ void imap_pretty_mailbox(char *path) { struct ImapMbox home, target; struct Url url; char *delim = NULL; int tlen; int hlen = 0; bool home_match = false; if (imap_parse_path(path, &target) < 0) return; tlen = mutt_str_strlen(target.mbox); /* check whether we can do '=' substitution */ if (mx_is_imap(Folder) && !imap_parse_path(Folder, &home)) { hlen = mutt_str_strlen(home.mbox); if (tlen && mutt_account_match(&home.account, &target.account) && (mutt_str_strncmp(home.mbox, target.mbox, hlen) == 0)) { if (hlen == 0) home_match = true; else if (ImapDelimChars) { for (delim = ImapDelimChars; *delim != '\0'; delim++) if (target.mbox[hlen] == *delim) home_match = true; } } FREE(&home.mbox); } /* do the '=' substitution */ if (home_match) { *path++ = '='; /* copy remaining path, skipping delimiter */ if (hlen == 0) hlen = -1; memcpy(path, target.mbox + hlen + 1, tlen - hlen - 1); path[tlen - hlen - 1] = '\0'; } else { mutt_account_tourl(&target.account, &url); url.path = target.mbox; /* FIXME: That hard-coded constant is bogus. But we need the actual * size of the buffer from mutt_pretty_mailbox. And these pretty * operations usually shrink the result. Still... */ url_tostring(&url, path, 1024, 0); } FREE(&target.mbox); } /** * imap_continue - display a message and ask the user if they want to go on * @param msg Location of the error * @param resp Message for user * @retval num Result: #MUTT_YES, #MUTT_NO, #MUTT_ABORT */ int imap_continue(const char *msg, const char *resp) { imap_error(msg, resp); return mutt_yesorno(_("Continue?"), 0); } /** * imap_error - show an error and abort * @param where Location of the error * @param msg Message for user */ void imap_error(const char *where, const char *msg) { mutt_error("%s [%s]\n", where, msg); } /** * imap_new_idata - Allocate and initialise a new ImapData structure * @retval NULL Failure (no mem) * @retval ptr New ImapData */ struct ImapData *imap_new_idata(void) { struct ImapData *idata = mutt_mem_calloc(1, sizeof(struct ImapData)); idata->cmdbuf = mutt_buffer_new(); idata->cmdslots = ImapPipelineDepth + 2; idata->cmds = mutt_mem_calloc(idata->cmdslots, sizeof(*idata->cmds)); STAILQ_INIT(&idata->flags); STAILQ_INIT(&idata->mboxcache); return idata; } /** * imap_free_idata - Release and clear storage in an ImapData structure * @param idata Server data */ void imap_free_idata(struct ImapData **idata) { if (!idata) return; FREE(&(*idata)->capstr); mutt_list_free(&(*idata)->flags); imap_mboxcache_free(*idata); mutt_buffer_free(&(*idata)->cmdbuf); FREE(&(*idata)->buf); mutt_bcache_close(&(*idata)->bcache); FREE(&(*idata)->cmds); FREE(idata); } /** * imap_fix_path - Fix up the imap path * @param idata Server data * @param mailbox Mailbox path * @param path Buffer for the result * @param plen Length of buffer * @retval ptr Fixed-up path * * This is necessary because the rest of neomutt assumes a hierarchy delimiter of * '/', which is not necessarily true in IMAP. Additionally, the filesystem * converts multiple hierarchy delimiters into a single one, ie "///" is equal * to "/". IMAP servers are not required to do this. * Moreover, IMAP servers may dislike the path ending with the delimiter. */ char *imap_fix_path(struct ImapData *idata, const char *mailbox, char *path, size_t plen) { int i = 0; char delim = '\0'; if (idata) delim = idata->delim; while (mailbox && *mailbox && i < plen - 1) { if ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim)) { /* use connection delimiter if known. Otherwise use user delimiter */ if (!idata) delim = *mailbox; while (*mailbox && ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim))) { mailbox++; } path[i] = delim; } else { path[i] = *mailbox; mailbox++; } i++; } if (i && path[--i] != delim) i++; path[i] = '\0'; return path; } /** * imap_cachepath - Generate a cache path for a mailbox * @param idata Server data * @param mailbox Mailbox name * @param dest Buffer to store cache path * @param dlen Length of buffer */ void imap_cachepath(struct ImapData *idata, const char *mailbox, char *dest, size_t dlen) { char *s = NULL; const char *p = mailbox; for (s = dest; p && *p && dlen; dlen--) { if (*p == idata->delim) { *s = '/'; /* simple way to avoid collisions with UIDs */ if (*(p + 1) >= '0' && *(p + 1) <= '9') { if (--dlen) *++s = '_'; } } else *s = *p; p++; s++; } *s = '\0'; } /** * imap_get_literal_count - write number of bytes in an IMAP literal into bytes * @param[in] buf Number as a string * @param[out] bytes Resulting number * @retval 0 Success * @retval -1 Failure */ int imap_get_literal_count(const char *buf, unsigned int *bytes) { char *pc = NULL; char *pn = NULL; if (!buf || !(pc = strchr(buf, '{'))) return -1; pc++; pn = pc; while (isdigit((unsigned char) *pc)) pc++; *pc = '\0'; if (mutt_str_atoui(pn, bytes) < 0) return -1; return 0; } /** * imap_get_qualifier - Get the qualifier from a tagged response * @param buf Command string to process * @retval ptr Start of the qualifier * * In a tagged response, skip tag and status for the qualifier message. * Used by imap_copy_message for TRYCREATE */ char *imap_get_qualifier(char *buf) { char *s = buf; /* skip tag */ s = imap_next_word(s); /* skip OK/NO/BAD response */ s = imap_next_word(s); return s; } /** * imap_next_word - Find where the next IMAP word begins * @param s Command string to process * @retval ptr Next IMAP word */ char *imap_next_word(char *s) { int quoted = 0; while (*s) { if (*s == '\\') { s++; if (*s) s++; continue; } if (*s == '\"') quoted = quoted ? 0 : 1; if (!quoted && ISSPACE(*s)) break; s++; } SKIPWS(s); return s; } /** * imap_qualify_path - Make an absolute IMAP folder target * @param dest Buffer for the result * @param len Length of buffer * @param mx Imap mailbox * @param path Path relative to the mailbox * * given ImapMbox and relative path. */ void imap_qualify_path(char *dest, size_t len, struct ImapMbox *mx, char *path) { struct Url url; mutt_account_tourl(&mx->account, &url); url.path = path; url_tostring(&url, dest, len, 0); } /** * imap_quote_string - quote string according to IMAP rules * @param dest Buffer for the result * @param dlen Length of the buffer * @param src String to be quoted * * Surround string with quotes, escape " and \ with backslash */ void imap_quote_string(char *dest, size_t dlen, const char *src, bool quote_backtick) { const char *quote = "`\"\\"; if (!quote_backtick) quote++; char *pt = dest; const char *s = src; *pt++ = '"'; /* save room for quote-chars */ dlen -= 3; for (; *s && dlen; s++) { if (strchr(quote, *s)) { if (dlen < 2) break; dlen -= 2; *pt++ = '\\'; *pt++ = *s; } else { *pt++ = *s; dlen--; } } *pt++ = '"'; *pt = '\0'; } /** * imap_unquote_string - equally stupid unquoting routine * @param s String to be unquoted */ void imap_unquote_string(char *s) { char *d = s; if (*s == '\"') s++; else return; while (*s) { if (*s == '\"') { *d = '\0'; return; } if (*s == '\\') { s++; } if (*s) { *d = *s; d++; s++; } } *d = '\0'; } /** * imap_munge_mbox_name - Quote awkward characters in a mailbox name * @param idata Server data * @param dest Buffer to store safe mailbox name * @param dlen Length of buffer * @param src Mailbox name */ void imap_munge_mbox_name(struct ImapData *idata, char *dest, size_t dlen, const char *src) { char *buf = mutt_str_strdup(src); imap_utf_encode(idata, &buf); imap_quote_string(dest, dlen, buf, false); FREE(&buf); } /** * imap_unmunge_mbox_name - Remove quoting from a mailbox name * @param idata Server data * @param s Mailbox name * * The string will be altered in-place. */ void imap_unmunge_mbox_name(struct ImapData *idata, char *s) { imap_unquote_string(s); char *buf = mutt_str_strdup(s); if (buf) { imap_utf_decode(idata, &buf); strncpy(s, buf, strlen(s)); } FREE(&buf); } /** * imap_keepalive - poll the current folder to keep the connection alive */ void imap_keepalive(void) { struct Connection *conn = NULL; struct ImapData *idata = NULL; time_t now = time(NULL); TAILQ_FOREACH(conn, mutt_socket_head(), entries) { if (conn->account.type == MUTT_ACCT_TYPE_IMAP) { idata = conn->data; if (idata->state >= IMAP_AUTHENTICATED && now >= idata->lastread + ImapKeepalive) { imap_check(idata, 1); } } } } /** * imap_wait_keepalive - Wait for a process to change state * @param pid Process ID to listen to * @retval num 'wstatus' from waitpid() */ int imap_wait_keepalive(pid_t pid) { struct sigaction oldalrm; struct sigaction act; sigset_t oldmask; int rc; bool imap_passive = ImapPassive; ImapPassive = true; OptKeepQuiet = true; sigprocmask(SIG_SETMASK, NULL, &oldmask); sigemptyset(&act.sa_mask); act.sa_handler = mutt_sig_empty_handler; #ifdef SA_INTERRUPT act.sa_flags = SA_INTERRUPT; #else act.sa_flags = 0; #endif sigaction(SIGALRM, &act, &oldalrm); alarm(ImapKeepalive); while (waitpid(pid, &rc, 0) < 0 && errno == EINTR) { alarm(0); /* cancel a possibly pending alarm */ imap_keepalive(); alarm(ImapKeepalive); } alarm(0); /* cancel a possibly pending alarm */ sigaction(SIGALRM, &oldalrm, NULL); sigprocmask(SIG_SETMASK, &oldmask, NULL); OptKeepQuiet = false; if (!imap_passive) ImapPassive = false; return rc; } /** * imap_allow_reopen - Allow re-opening a folder upon expunge * @param ctx Context */ void imap_allow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen |= IMAP_REOPEN_ALLOW; } /** * imap_disallow_reopen - Disallow re-opening a folder upon expunge * @param ctx Context */ void imap_disallow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen &= ~IMAP_REOPEN_ALLOW; } /** * imap_account_match - Compare two Accounts * @param a1 First Account * @param a2 Second Account * @retval true Accounts match */ int imap_account_match(const struct Account *a1, const struct Account *a2) { struct ImapData *a1_idata = imap_conn_find(a1, MUTT_IMAP_CONN_NONEW); struct ImapData *a2_idata = imap_conn_find(a2, MUTT_IMAP_CONN_NONEW); const struct Account *a1_canon = a1_idata == NULL ? a1 : &a1_idata->conn->account; const struct Account *a2_canon = a2_idata == NULL ? a2 : &a2_idata->conn->account; return mutt_account_match(a1_canon, a2_canon); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_246_0
crossvul-cpp_data_good_4787_21
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % GGGG IIIII FFFFF % % G I F % % G GG I FFF % % G G I F % % GGG IIIII F % % % % % % Read/Write Compuserv Graphics Interchange Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/profile.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" /* Define declarations. */ #define MaximumLZWBits 12 #define MaximumLZWCode (1UL << MaximumLZWBits) /* Typdef declarations. */ typedef struct _LZWCodeInfo { unsigned char buffer[280]; size_t count, bit; MagickBooleanType eof; } LZWCodeInfo; typedef struct _LZWStack { size_t *codes, *index, *top; } LZWStack; typedef struct _LZWInfo { Image *image; LZWStack *stack; MagickBooleanType genesis; size_t data_size, maximum_data_value, clear_code, end_code, bits, first_code, last_code, maximum_code, slot, *table[2]; LZWCodeInfo code_info; } LZWInfo; /* Forward declarations. */ static inline int GetNextLZWCode(LZWInfo *,const size_t); static MagickBooleanType WriteGIFImage(const ImageInfo *,Image *); static ssize_t ReadBlobBlock(Image *,unsigned char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e c o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DecodeImage uncompresses an image via GIF-coding. % % The format of the DecodeImage method is: % % MagickBooleanType DecodeImage(Image *image,const ssize_t opacity) % % A description of each parameter follows: % % o image: the address of a structure of type Image. % % o opacity: The colormap index associated with the transparent color. % */ static LZWInfo *RelinquishLZWInfo(LZWInfo *lzw_info) { if (lzw_info->table[0] != (size_t *) NULL) lzw_info->table[0]=(size_t *) RelinquishMagickMemory( lzw_info->table[0]); if (lzw_info->table[1] != (size_t *) NULL) lzw_info->table[1]=(size_t *) RelinquishMagickMemory( lzw_info->table[1]); if (lzw_info->stack != (LZWStack *) NULL) { if (lzw_info->stack->codes != (size_t *) NULL) lzw_info->stack->codes=(size_t *) RelinquishMagickMemory( lzw_info->stack->codes); lzw_info->stack=(LZWStack *) RelinquishMagickMemory(lzw_info->stack); } lzw_info=(LZWInfo *) RelinquishMagickMemory(lzw_info); return((LZWInfo *) NULL); } static inline void ResetLZWInfo(LZWInfo *lzw_info) { size_t one; lzw_info->bits=lzw_info->data_size+1; one=1; lzw_info->maximum_code=one << lzw_info->bits; lzw_info->slot=lzw_info->maximum_data_value+3; lzw_info->genesis=MagickTrue; } static LZWInfo *AcquireLZWInfo(Image *image,const size_t data_size) { LZWInfo *lzw_info; register ssize_t i; size_t one; lzw_info=(LZWInfo *) AcquireMagickMemory(sizeof(*lzw_info)); if (lzw_info == (LZWInfo *) NULL) return((LZWInfo *) NULL); (void) ResetMagickMemory(lzw_info,0,sizeof(*lzw_info)); lzw_info->image=image; lzw_info->data_size=data_size; one=1; lzw_info->maximum_data_value=(one << data_size)-1; lzw_info->clear_code=lzw_info->maximum_data_value+1; lzw_info->end_code=lzw_info->maximum_data_value+2; lzw_info->table[0]=(size_t *) AcquireQuantumMemory(MaximumLZWCode, sizeof(*lzw_info->table)); lzw_info->table[1]=(size_t *) AcquireQuantumMemory(MaximumLZWCode, sizeof(*lzw_info->table)); if ((lzw_info->table[0] == (size_t *) NULL) || (lzw_info->table[1] == (size_t *) NULL)) { lzw_info=RelinquishLZWInfo(lzw_info); return((LZWInfo *) NULL); } for (i=0; i <= (ssize_t) lzw_info->maximum_data_value; i++) { lzw_info->table[0][i]=0; lzw_info->table[1][i]=(size_t) i; } ResetLZWInfo(lzw_info); lzw_info->code_info.buffer[0]='\0'; lzw_info->code_info.buffer[1]='\0'; lzw_info->code_info.count=2; lzw_info->code_info.bit=8*lzw_info->code_info.count; lzw_info->code_info.eof=MagickFalse; lzw_info->genesis=MagickTrue; lzw_info->stack=(LZWStack *) AcquireMagickMemory(sizeof(*lzw_info->stack)); if (lzw_info->stack == (LZWStack *) NULL) { lzw_info=RelinquishLZWInfo(lzw_info); return((LZWInfo *) NULL); } lzw_info->stack->codes=(size_t *) AcquireQuantumMemory(2UL* MaximumLZWCode,sizeof(*lzw_info->stack->codes)); if (lzw_info->stack->codes == (size_t *) NULL) { lzw_info=RelinquishLZWInfo(lzw_info); return((LZWInfo *) NULL); } lzw_info->stack->index=lzw_info->stack->codes; lzw_info->stack->top=lzw_info->stack->codes+2*MaximumLZWCode; return(lzw_info); } static inline int GetNextLZWCode(LZWInfo *lzw_info,const size_t bits) { int code; register ssize_t i; size_t one; while (((lzw_info->code_info.bit+bits) > (8*lzw_info->code_info.count)) && (lzw_info->code_info.eof == MagickFalse)) { ssize_t count; lzw_info->code_info.buffer[0]=lzw_info->code_info.buffer[ lzw_info->code_info.count-2]; lzw_info->code_info.buffer[1]=lzw_info->code_info.buffer[ lzw_info->code_info.count-1]; lzw_info->code_info.bit-=8*(lzw_info->code_info.count-2); lzw_info->code_info.count=2; count=ReadBlobBlock(lzw_info->image,&lzw_info->code_info.buffer[ lzw_info->code_info.count]); if (count > 0) lzw_info->code_info.count+=count; else lzw_info->code_info.eof=MagickTrue; } if ((lzw_info->code_info.bit+bits) > (8*lzw_info->code_info.count)) return(-1); code=0; one=1; for (i=0; i < (ssize_t) bits; i++) { code|=((lzw_info->code_info.buffer[lzw_info->code_info.bit/8] & (one << (lzw_info->code_info.bit % 8))) != 0) << i; lzw_info->code_info.bit++; } return(code); } static inline int PopLZWStack(LZWStack *stack_info) { if (stack_info->index <= stack_info->codes) return(-1); stack_info->index--; return((int) *stack_info->index); } static inline void PushLZWStack(LZWStack *stack_info,const size_t value) { if (stack_info->index >= stack_info->top) return; *stack_info->index=value; stack_info->index++; } static int ReadBlobLZWByte(LZWInfo *lzw_info) { int code; size_t one, value; ssize_t count; if (lzw_info->stack->index != lzw_info->stack->codes) return(PopLZWStack(lzw_info->stack)); if (lzw_info->genesis != MagickFalse) { lzw_info->genesis=MagickFalse; do { lzw_info->first_code=(size_t) GetNextLZWCode(lzw_info,lzw_info->bits); lzw_info->last_code=lzw_info->first_code; } while (lzw_info->first_code == lzw_info->clear_code); return((int) lzw_info->first_code); } code=GetNextLZWCode(lzw_info,lzw_info->bits); if (code < 0) return(code); if ((size_t) code == lzw_info->clear_code) { ResetLZWInfo(lzw_info); return(ReadBlobLZWByte(lzw_info)); } if ((size_t) code == lzw_info->end_code) return(-1); if ((size_t) code < lzw_info->slot) value=(size_t) code; else { PushLZWStack(lzw_info->stack,lzw_info->first_code); value=lzw_info->last_code; } count=0; while (value > lzw_info->maximum_data_value) { if ((size_t) count > MaximumLZWCode) return(-1); count++; if ((size_t) value > MaximumLZWCode) return(-1); PushLZWStack(lzw_info->stack,lzw_info->table[1][value]); value=lzw_info->table[0][value]; } lzw_info->first_code=lzw_info->table[1][value]; PushLZWStack(lzw_info->stack,lzw_info->first_code); one=1; if (lzw_info->slot < MaximumLZWCode) { lzw_info->table[0][lzw_info->slot]=lzw_info->last_code; lzw_info->table[1][lzw_info->slot]=lzw_info->first_code; lzw_info->slot++; if ((lzw_info->slot >= lzw_info->maximum_code) && (lzw_info->bits < MaximumLZWBits)) { lzw_info->bits++; lzw_info->maximum_code=one << lzw_info->bits; } } lzw_info->last_code=(size_t) code; return(PopLZWStack(lzw_info->stack)); } static MagickBooleanType DecodeImage(Image *image,const ssize_t opacity) { ExceptionInfo *exception; IndexPacket index; int c; LZWInfo *lzw_info; ssize_t offset, y; unsigned char data_size; size_t pass; /* Allocate decoder tables. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); data_size=(unsigned char) ReadBlobByte(image); if (data_size > MaximumLZWBits) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); lzw_info=AcquireLZWInfo(image,data_size); if (lzw_info == (LZWInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); exception=(&image->exception); pass=0; offset=0; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; q=QueueAuthenticPixels(image,0,offset,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { c=ReadBlobLZWByte(lzw_info); if (c < 0) break; index=ConstrainColormapIndex(image,(size_t) c); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); SetPixelOpacity(q,(ssize_t) index == opacity ? TransparentOpacity : OpaqueOpacity); x++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (x < (ssize_t) image->columns) break; if (image->interlace == NoInterlace) offset++; else { switch (pass) { case 0: default: { offset+=8; break; } case 1: { offset+=8; break; } case 2: { offset+=4; break; } case 3: { offset+=2; break; } } if ((pass == 0) && (offset >= (ssize_t) image->rows)) { pass++; offset=4; } if ((pass == 1) && (offset >= (ssize_t) image->rows)) { pass++; offset=2; } if ((pass == 2) && (offset >= (ssize_t) image->rows)) { pass++; offset=1; } } } lzw_info=RelinquishLZWInfo(lzw_info); if (y < (ssize_t) image->rows) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n c o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EncodeImage compresses an image via GIF-coding. % % The format of the EncodeImage method is: % % MagickBooleanType EncodeImage(const ImageInfo *image_info,Image *image, % const size_t data_size) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the address of a structure of type Image. % % o data_size: The number of bits in the compressed packet. % */ static MagickBooleanType EncodeImage(const ImageInfo *image_info,Image *image, const size_t data_size) { #define MaxCode(number_bits) ((one << (number_bits))-1) #define MaxHashTable 5003 #define MaxGIFBits 12UL #define MaxGIFTable (1UL << MaxGIFBits) #define GIFOutputCode(code) \ { \ /* \ Emit a code. \ */ \ if (bits > 0) \ datum|=(code) << bits; \ else \ datum=code; \ bits+=number_bits; \ while (bits >= 8) \ { \ /* \ Add a character to current packet. \ */ \ packet[length++]=(unsigned char) (datum & 0xff); \ if (length >= 254) \ { \ (void) WriteBlobByte(image,(unsigned char) length); \ (void) WriteBlob(image,length,packet); \ length=0; \ } \ datum>>=8; \ bits-=8; \ } \ if (free_code > max_code) \ { \ number_bits++; \ if (number_bits == MaxGIFBits) \ max_code=MaxGIFTable; \ else \ max_code=MaxCode(number_bits); \ } \ } IndexPacket index; register ssize_t i; short *hash_code, *hash_prefix, waiting_code; size_t bits, clear_code, datum, end_of_information_code, free_code, length, max_code, next_pixel, number_bits, one, pass; ssize_t displacement, offset, k, y; unsigned char *packet, *hash_suffix; /* Allocate encoder tables. */ assert(image != (Image *) NULL); one=1; packet=(unsigned char *) AcquireQuantumMemory(256,sizeof(*packet)); hash_code=(short *) AcquireQuantumMemory(MaxHashTable,sizeof(*hash_code)); hash_prefix=(short *) AcquireQuantumMemory(MaxHashTable,sizeof(*hash_prefix)); hash_suffix=(unsigned char *) AcquireQuantumMemory(MaxHashTable, sizeof(*hash_suffix)); if ((packet == (unsigned char *) NULL) || (hash_code == (short *) NULL) || (hash_prefix == (short *) NULL) || (hash_suffix == (unsigned char *) NULL)) { if (packet != (unsigned char *) NULL) packet=(unsigned char *) RelinquishMagickMemory(packet); if (hash_code != (short *) NULL) hash_code=(short *) RelinquishMagickMemory(hash_code); if (hash_prefix != (short *) NULL) hash_prefix=(short *) RelinquishMagickMemory(hash_prefix); if (hash_suffix != (unsigned char *) NULL) hash_suffix=(unsigned char *) RelinquishMagickMemory(hash_suffix); return(MagickFalse); } /* Initialize GIF encoder. */ number_bits=data_size; max_code=MaxCode(number_bits); clear_code=((short) one << (data_size-1)); end_of_information_code=clear_code+1; free_code=clear_code+2; length=0; datum=0; bits=0; for (i=0; i < MaxHashTable; i++) hash_code[i]=0; GIFOutputCode(clear_code); /* Encode pixels. */ offset=0; pass=0; waiting_code=0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,offset,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); if (y == 0) waiting_code=(short) (*indexes); for (x=(ssize_t) (y == 0 ? 1 : 0); x < (ssize_t) image->columns; x++) { /* Probe hash table. */ index=(IndexPacket) ((size_t) GetPixelIndex(indexes+x) & 0xff); p++; k=(ssize_t) (((size_t) index << (MaxGIFBits-8))+waiting_code); if (k >= MaxHashTable) k-=MaxHashTable; next_pixel=MagickFalse; displacement=1; if (hash_code[k] > 0) { if ((hash_prefix[k] == waiting_code) && (hash_suffix[k] == (unsigned char) index)) { waiting_code=hash_code[k]; continue; } if (k != 0) displacement=MaxHashTable-k; for ( ; ; ) { k-=displacement; if (k < 0) k+=MaxHashTable; if (hash_code[k] == 0) break; if ((hash_prefix[k] == waiting_code) && (hash_suffix[k] == (unsigned char) index)) { waiting_code=hash_code[k]; next_pixel=MagickTrue; break; } } if (next_pixel != MagickFalse) continue; } GIFOutputCode((size_t) waiting_code); if (free_code < MaxGIFTable) { hash_code[k]=(short) free_code++; hash_prefix[k]=waiting_code; hash_suffix[k]=(unsigned char) index; } else { /* Fill the hash table with empty entries. */ for (k=0; k < MaxHashTable; k++) hash_code[k]=0; /* Reset compressor and issue a clear code. */ free_code=clear_code+2; GIFOutputCode(clear_code); number_bits=data_size; max_code=MaxCode(number_bits); } waiting_code=(short) index; } if (image_info->interlace == NoInterlace) offset++; else switch (pass) { case 0: default: { offset+=8; if (offset >= (ssize_t) image->rows) { pass++; offset=4; } break; } case 1: { offset+=8; if (offset >= (ssize_t) image->rows) { pass++; offset=2; } break; } case 2: { offset+=4; if (offset >= (ssize_t) image->rows) { pass++; offset=1; } break; } case 3: { offset+=2; break; } } } /* Flush out the buffered code. */ GIFOutputCode((size_t) waiting_code); GIFOutputCode(end_of_information_code); if (bits > 0) { /* Add a character to current packet. */ packet[length++]=(unsigned char) (datum & 0xff); if (length >= 254) { (void) WriteBlobByte(image,(unsigned char) length); (void) WriteBlob(image,length,packet); length=0; } } /* Flush accumulated data. */ if (length > 0) { (void) WriteBlobByte(image,(unsigned char) length); (void) WriteBlob(image,length,packet); } /* Free encoder memory. */ hash_suffix=(unsigned char *) RelinquishMagickMemory(hash_suffix); hash_prefix=(short *) RelinquishMagickMemory(hash_prefix); hash_code=(short *) RelinquishMagickMemory(hash_code); packet=(unsigned char *) RelinquishMagickMemory(packet); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G I F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGIF() returns MagickTrue if the image format type, identified by the % magick string, is GIF. % % The format of the IsGIF method is: % % MagickBooleanType IsGIF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsGIF(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"GIF8",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d B l o b B l o c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadBlobBlock() reads data from the image file and returns it. The % amount of data is determined by first reading a count byte. The number % of bytes read is returned. % % The format of the ReadBlobBlock method is: % % size_t ReadBlobBlock(Image *image,unsigned char *data) % % A description of each parameter follows: % % o image: the image. % % o data: Specifies an area to place the information requested from % the file. % */ static ssize_t ReadBlobBlock(Image *image,unsigned char *data) { ssize_t count; unsigned char block_count; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(data != (unsigned char *) NULL); count=ReadBlob(image,1,&block_count); if (count != 1) return(0); count=ReadBlob(image,(size_t) block_count,data); if (count != (ssize_t) block_count) return(0); return(count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d G I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadGIFImage() reads a Compuserve Graphics image file and returns it. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadGIFImage method is: % % Image *ReadGIFImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static inline size_t MagickMax(const size_t x,const size_t y) { if (x > y) return(x); return(y); } static inline size_t MagickMin(const size_t x,const size_t y) { if (x < y) return(x); return(y); } static MagickBooleanType PingGIFImage(Image *image) { unsigned char buffer[256], length, data_size; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (ReadBlob(image,1,&data_size) != 1) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); if (data_size > MaximumLZWBits) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); if (ReadBlob(image,1,&length) != 1) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); while (length != 0) { if (ReadBlob(image,length,buffer) != (ssize_t) length) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); if (ReadBlob(image,1,&length) != 1) ThrowBinaryException(CorruptImageError,"CorruptImage",image->filename); } return(MagickTrue); } static Image *ReadGIFImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define BitSet(byte,bit) (((byte) & (bit)) == (bit)) #define LSBFirstOrder(x,y) (((y) << 8) | (x)) Image *image, *meta_image; int number_extensionss=0; MagickBooleanType status; RectangleInfo page; register ssize_t i; register unsigned char *p; size_t delay, dispose, duration, global_colors, image_count, iterations, one; ssize_t count, opacity; unsigned char background, c, flag, *global_colormap, header[MaxTextExtent], magick[12]; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a GIF file. */ count=ReadBlob(image,6,magick); if ((count != 6) || ((LocaleNCompare((char *) magick,"GIF87",5) != 0) && (LocaleNCompare((char *) magick,"GIF89",5) != 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); page.width=ReadBlobLSBShort(image); page.height=ReadBlobLSBShort(image); flag=(unsigned char) ReadBlobByte(image); background=(unsigned char) ReadBlobByte(image); c=(unsigned char) ReadBlobByte(image); /* reserved */ one=1; global_colors=one << (((size_t) flag & 0x07)+1); global_colormap=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(global_colors,256),3UL*sizeof(*global_colormap)); if (global_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (BitSet((int) flag,0x80) != 0) count=ReadBlob(image,(size_t) (3*global_colors),global_colormap); delay=0; dispose=0; duration=0; iterations=1; opacity=(-1); image_count=0; meta_image=AcquireImage(image_info); /* metadata container */ for ( ; ; ) { count=ReadBlob(image,1,&c); if (count != 1) break; if (c == (unsigned char) ';') break; /* terminator */ if (c == (unsigned char) '!') { /* GIF Extension block. */ count=ReadBlob(image,1,&c); if (count != 1) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); ThrowReaderException(CorruptImageError,"UnableToReadExtensionBlock"); } switch (c) { case 0xf9: { /* Read graphics control extension. */ while (ReadBlobBlock(image,header) != 0) ; dispose=(size_t) (header[0] >> 2); delay=(size_t) ((header[2] << 8) | header[1]); if ((ssize_t) (header[0] & 0x01) == 0x01) opacity=(ssize_t) header[3]; break; } case 0xfe: { char *comments; size_t length; /* Read comment extension. */ comments=AcquireString((char *) NULL); for (length=0; ; length+=count) { count=(ssize_t) ReadBlobBlock(image,header); if (count == 0) break; header[count]='\0'; (void) ConcatenateString(&comments,(const char *) header); } (void) SetImageProperty(meta_image,"comment",comments); comments=DestroyString(comments); break; } case 0xff: { MagickBooleanType loop; /* Read Netscape Loop extension. */ loop=MagickFalse; if (ReadBlobBlock(image,header) != 0) loop=LocaleNCompare((char *) header,"NETSCAPE2.0",11) == 0 ? MagickTrue : MagickFalse; if (loop != MagickFalse) { while (ReadBlobBlock(image,header) != 0) iterations=(size_t) ((header[2] << 8) | header[1]); break; } else { char name[MaxTextExtent]; int block_length, info_length, reserved_length; MagickBooleanType i8bim, icc, iptc, magick; StringInfo *profile; unsigned char *info; /* Store GIF application extension as a generic profile. */ icc=LocaleNCompare((char *) header,"ICCRGBG1012",11) == 0 ? MagickTrue : MagickFalse; magick=LocaleNCompare((char *) header,"ImageMagick",11) == 0 ? MagickTrue : MagickFalse; i8bim=LocaleNCompare((char *) header,"MGK8BIM0000",11) == 0 ? MagickTrue : MagickFalse; iptc=LocaleNCompare((char *) header,"MGKIPTC0000",11) == 0 ? MagickTrue : MagickFalse; number_extensionss++; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading GIF application extension"); info=(unsigned char *) AcquireQuantumMemory(255UL,sizeof(*info)); reserved_length=255; for (info_length=0; ; ) { block_length=(int) ReadBlobBlock(image,&info[info_length]); if (block_length == 0) break; info_length+=block_length; if (info_length > (reserved_length-255)) { reserved_length+=4096; info=(unsigned char *) ResizeQuantumMemory(info,(size_t) reserved_length,sizeof(*info)); if (info == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } } profile=BlobToStringInfo(info,(size_t) info_length); if (profile == (StringInfo *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); if (i8bim != MagickFalse) (void) CopyMagickString(name,"8bim",sizeof(name)); else if (icc != MagickFalse) (void) CopyMagickString(name,"icc",sizeof(name)); else if (iptc != MagickFalse) (void) CopyMagickString(name,"iptc",sizeof(name)); else if (magick != MagickFalse) { (void) CopyMagickString(name,"magick",sizeof(name)); image->gamma=StringToDouble((char *) info+6,(char **) NULL); } else (void) FormatLocaleString(name,sizeof(name),"gif:%.11s", header); info=(unsigned char *) RelinquishMagickMemory(info); if (magick == MagickFalse) (void) SetImageProfile(meta_image,name,profile); profile=DestroyStringInfo(profile); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " profile name=%s",name); } break; } default: { while (ReadBlobBlock(image,header) != 0) ; break; } } } if (c != (unsigned char) ',') continue; if (image_count != 0) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); return((Image *) NULL); } image=SyncNextImageInList(image); } image_count++; /* Read image attributes. */ meta_image->scene=image->scene; CloneImageProperties(image,meta_image); DestroyImageProperties(meta_image); CloneImageProfiles(image,meta_image); DestroyImageProfiles(meta_image); image->storage_class=PseudoClass; image->compression=LZWCompression; page.x=(ssize_t) ReadBlobLSBShort(image); page.y=(ssize_t) ReadBlobLSBShort(image); image->columns=ReadBlobLSBShort(image); image->rows=ReadBlobLSBShort(image); image->depth=8; flag=(unsigned char) ReadBlobByte(image); image->interlace=BitSet((int) flag,0x40) != 0 ? GIFInterlace : NoInterlace; image->colors=BitSet((int) flag,0x80) == 0 ? global_colors : one << ((size_t) (flag & 0x07)+1); if (opacity >= (ssize_t) image->colors) opacity=(-1); image->page.width=page.width; image->page.height=page.height; image->page.y=page.y; image->page.x=page.x; image->delay=delay; image->iterations=iterations; image->ticks_per_second=100; image->dispose=(DisposeType) dispose; image->matte=opacity >= 0 ? MagickTrue : MagickFalse; delay=0; dispose=0; if ((image->columns == 0) || (image->rows == 0)) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); } /* Inititialize colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) { global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (BitSet((int) flag,0x80) == 0) { /* Use global colormap. */ p=global_colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].blue=ScaleCharToQuantum(*p++); if (i == opacity) { image->colormap[i].opacity=(Quantum) TransparentOpacity; image->transparent_color=image->colormap[opacity]; } } image->background_color=image->colormap[MagickMin(background, image->colors-1)]; } else { unsigned char *colormap; /* Read local colormap. */ colormap=(unsigned char *) AcquireQuantumMemory(image->colors,3* sizeof(*colormap)); if (colormap == (unsigned char *) NULL) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } count=ReadBlob(image,(3*image->colors)*sizeof(*colormap),colormap); if (count != (ssize_t) (3*image->colors)) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } p=colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].blue=ScaleCharToQuantum(*p++); if (i == opacity) image->colormap[i].opacity=(Quantum) TransparentOpacity; } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } if (image->gamma == 1.0) { for (i=0; i < (ssize_t) image->colors; i++) if (IsGrayPixel(image->colormap+i) == MagickFalse) break; (void) SetImageColorspace(image,i == (ssize_t) image->colors ? GRAYColorspace : RGBColorspace); } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Decode image. */ if (image_info->ping != MagickFalse) status=PingGIFImage(image); else status=DecodeImage(image,opacity); if ((image_info->ping == MagickFalse) && (status == MagickFalse)) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); ThrowReaderException(CorruptImageError,"CorruptImage"); } duration+=image->delay*image->iterations; if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; opacity=(-1); status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) image->scene-1, image->scene); if (status == MagickFalse) break; } image->duration=duration; meta_image=DestroyImage(meta_image); global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r G I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterGIFImage() adds properties for the GIF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterGIFImage method is: % % size_t RegisterGIFImage(void) % */ ModuleExport size_t RegisterGIFImage(void) { MagickInfo *entry; entry=SetMagickInfo("GIF"); entry->decoder=(DecodeImageHandler *) ReadGIFImage; entry->encoder=(EncodeImageHandler *) WriteGIFImage; entry->magick=(IsImageFormatHandler *) IsGIF; entry->description=ConstantString("CompuServe graphics interchange format"); entry->mime_type=ConstantString("image/gif"); entry->module=ConstantString("GIF"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("GIF87"); entry->decoder=(DecodeImageHandler *) ReadGIFImage; entry->encoder=(EncodeImageHandler *) WriteGIFImage; entry->magick=(IsImageFormatHandler *) IsGIF; entry->adjoin=MagickFalse; entry->description=ConstantString("CompuServe graphics interchange format"); entry->version=ConstantString("version 87a"); entry->mime_type=ConstantString("image/gif"); entry->module=ConstantString("GIF"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r G I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterGIFImage() removes format registrations made by the % GIF module from the list of supported formats. % % The format of the UnregisterGIFImage method is: % % UnregisterGIFImage(void) % */ ModuleExport void UnregisterGIFImage(void) { (void) UnregisterMagickInfo("GIF"); (void) UnregisterMagickInfo("GIF87"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e G I F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteGIFImage() writes an image to a file in the Compuserve Graphics % image format. % % The format of the WriteGIFImage method is: % % MagickBooleanType WriteGIFImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteGIFImage(const ImageInfo *image_info,Image *image) { int c; ImageInfo *write_info; InterlaceType interlace; MagickBooleanType status; MagickOffsetType scene; RectangleInfo page; register ssize_t i; register unsigned char *q; size_t bits_per_pixel, delay, length, one; ssize_t j, opacity; unsigned char *colormap, *global_colormap; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Allocate colormap. */ global_colormap=(unsigned char *) AcquireQuantumMemory(768UL, sizeof(*global_colormap)); colormap=(unsigned char *) AcquireQuantumMemory(768UL,sizeof(*colormap)); if ((global_colormap == (unsigned char *) NULL) || (colormap == (unsigned char *) NULL)) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < 768; i++) colormap[i]=(unsigned char) 0; /* Write GIF header. */ write_info=CloneImageInfo(image_info); if (LocaleCompare(write_info->magick,"GIF87") != 0) (void) WriteBlob(image,6,(unsigned char *) "GIF89a"); else { (void) WriteBlob(image,6,(unsigned char *) "GIF87a"); write_info->adjoin=MagickFalse; } /* Determine image bounding box. */ page.width=image->columns; if (image->page.width > page.width) page.width=image->page.width; page.height=image->rows; if (image->page.height > page.height) page.height=image->page.height; page.x=image->page.x; page.y=image->page.y; (void) WriteBlobLSBShort(image,(unsigned short) page.width); (void) WriteBlobLSBShort(image,(unsigned short) page.height); /* Write images to file. */ interlace=write_info->interlace; if ((write_info->adjoin != MagickFalse) && (GetNextImageInList(image) != (Image *) NULL)) interlace=NoInterlace; scene=0; one=1; do { (void) TransformImageColorspace(image,sRGBColorspace); opacity=(-1); if (IsOpaqueImage(image,&image->exception) != MagickFalse) { if ((image->storage_class == DirectClass) || (image->colors > 256)) (void) SetImageType(image,PaletteType); } else { MagickRealType alpha, beta; /* Identify transparent colormap index. */ if ((image->storage_class == DirectClass) || (image->colors > 256)) (void) SetImageType(image,PaletteBilevelMatteType); for (i=0; i < (ssize_t) image->colors; i++) if (image->colormap[i].opacity != OpaqueOpacity) { if (opacity < 0) { opacity=i; continue; } alpha=(MagickRealType) TransparentOpacity-(MagickRealType) image->colormap[i].opacity; beta=(MagickRealType) TransparentOpacity-(MagickRealType) image->colormap[opacity].opacity; if (alpha < beta) opacity=i; } if (opacity == -1) { (void) SetImageType(image,PaletteBilevelMatteType); for (i=0; i < (ssize_t) image->colors; i++) if (image->colormap[i].opacity != OpaqueOpacity) { if (opacity < 0) { opacity=i; continue; } alpha=(Quantum) TransparentOpacity-(MagickRealType) image->colormap[i].opacity; beta=(Quantum) TransparentOpacity-(MagickRealType) image->colormap[opacity].opacity; if (alpha < beta) opacity=i; } } if (opacity >= 0) { image->colormap[opacity].red=image->transparent_color.red; image->colormap[opacity].green=image->transparent_color.green; image->colormap[opacity].blue=image->transparent_color.blue; } } if ((image->storage_class == DirectClass) || (image->colors > 256)) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); for (bits_per_pixel=1; bits_per_pixel < 8; bits_per_pixel++) if ((one << bits_per_pixel) >= image->colors) break; q=colormap; for (i=0; i < (ssize_t) image->colors; i++) { *q++=ScaleQuantumToChar(image->colormap[i].red); *q++=ScaleQuantumToChar(image->colormap[i].green); *q++=ScaleQuantumToChar(image->colormap[i].blue); } for ( ; i < (ssize_t) (one << bits_per_pixel); i++) { *q++=(unsigned char) 0x0; *q++=(unsigned char) 0x0; *q++=(unsigned char) 0x0; } if ((GetPreviousImageInList(image) == (Image *) NULL) || (write_info->adjoin == MagickFalse)) { /* Write global colormap. */ c=0x80; c|=(8-1) << 4; /* color resolution */ c|=(bits_per_pixel-1); /* size of global colormap */ (void) WriteBlobByte(image,(unsigned char) c); for (j=0; j < (ssize_t) image->colors; j++) if (IsColorEqual(&image->background_color,image->colormap+j)) break; (void) WriteBlobByte(image,(unsigned char) (j == (ssize_t) image->colors ? 0 : j)); /* background color */ (void) WriteBlobByte(image,(unsigned char) 0x00); /* reserved */ length=(size_t) (3*(one << bits_per_pixel)); (void) WriteBlob(image,length,colormap); for (j=0; j < 768; j++) global_colormap[j]=colormap[j]; } if (LocaleCompare(write_info->magick,"GIF87") != 0) { /* Write graphics control extension. */ (void) WriteBlobByte(image,(unsigned char) 0x21); (void) WriteBlobByte(image,(unsigned char) 0xf9); (void) WriteBlobByte(image,(unsigned char) 0x04); c=image->dispose << 2; if (opacity >= 0) c|=0x01; (void) WriteBlobByte(image,(unsigned char) c); delay=(size_t) (100*image->delay/MagickMax((size_t) image->ticks_per_second,1)); (void) WriteBlobLSBShort(image,(unsigned short) delay); (void) WriteBlobByte(image,(unsigned char) (opacity >= 0 ? opacity : 0)); (void) WriteBlobByte(image,(unsigned char) 0x00); if ((LocaleCompare(write_info->magick,"GIF87") != 0) && (GetImageProperty(image,"comment") != (const char *) NULL)) { const char *value; register const char *p; size_t count; /* Write comment extension. */ (void) WriteBlobByte(image,(unsigned char) 0x21); (void) WriteBlobByte(image,(unsigned char) 0xfe); value=GetImageProperty(image,"comment"); for (p=value; *p != '\0'; ) { count=MagickMin(strlen(p),255); (void) WriteBlobByte(image,(unsigned char) count); for (i=0; i < (ssize_t) count; i++) (void) WriteBlobByte(image,(unsigned char) *p++); } (void) WriteBlobByte(image,(unsigned char) 0x00); } if ((GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) != (Image *) NULL) && (image->iterations != 1)) { /* Write Netscape Loop extension. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s","NETSCAPE2.0"); (void) WriteBlobByte(image,(unsigned char) 0x21); (void) WriteBlobByte(image,(unsigned char) 0xff); (void) WriteBlobByte(image,(unsigned char) 0x0b); (void) WriteBlob(image,11,(unsigned char *) "NETSCAPE2.0"); (void) WriteBlobByte(image,(unsigned char) 0x03); (void) WriteBlobByte(image,(unsigned char) 0x01); (void) WriteBlobLSBShort(image,(unsigned short) image->iterations); (void) WriteBlobByte(image,(unsigned char) 0x00); } if ((image->gamma != 1.0f/2.2f)) { char attributes[MaxTextExtent]; ssize_t length; /* Write ImageMagick extension. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s","ImageMagick"); (void) WriteBlobByte(image,(unsigned char) 0x21); (void) WriteBlobByte(image,(unsigned char) 0xff); (void) WriteBlobByte(image,(unsigned char) 0x0b); (void) WriteBlob(image,11,(unsigned char *) "ImageMagick"); length=FormatLocaleString(attributes,MaxTextExtent,"gamma=%g", image->gamma); (void) WriteBlobByte(image,(unsigned char) length); (void) WriteBlob(image,length,(unsigned char *) attributes); (void) WriteBlobByte(image,(unsigned char) 0x00); } ResetImageProfileIterator(image); for ( ; ; ) { char *name; const StringInfo *profile; name=GetNextImageProfile(image); if (name == (const char *) NULL) break; profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0) || (LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0) || (LocaleNCompare(name,"gif:",4) == 0)) { size_t length; ssize_t offset; unsigned char *datum; datum=GetStringInfoDatum(profile); length=GetStringInfoLength(profile); (void) WriteBlobByte(image,(unsigned char) 0x21); (void) WriteBlobByte(image,(unsigned char) 0xff); (void) WriteBlobByte(image,(unsigned char) 0x0b); if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { /* Write ICC extension. */ (void) WriteBlob(image,11,(unsigned char *) "ICCRGBG1012"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s","ICCRGBG1012"); } else if ((LocaleCompare(name,"IPTC") == 0)) { /* Write IPTC extension. */ (void) WriteBlob(image,11,(unsigned char *) "MGKIPTC0000"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s","MGKIPTC0000"); } else if ((LocaleCompare(name,"8BIM") == 0)) { /* Write 8BIM extension. */ (void) WriteBlob(image,11,(unsigned char *) "MGK8BIM0000"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s","MGK8BIM0000"); } else { char extension[MaxTextExtent]; /* Write generic extension. */ (void) CopyMagickString(extension,name+4, sizeof(extension)); (void) WriteBlob(image,11,(unsigned char *) extension); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GIF Extension %s",name); } offset=0; while ((ssize_t) length > offset) { size_t block_length; if ((length-offset) < 255) block_length=length-offset; else block_length=255; (void) WriteBlobByte(image,(unsigned char) block_length); (void) WriteBlob(image,(size_t) block_length,datum+offset); offset+=(ssize_t) block_length; } (void) WriteBlobByte(image,(unsigned char) 0x00); } } } } (void) WriteBlobByte(image,','); /* image separator */ /* Write the image header. */ page.x=image->page.x; page.y=image->page.y; if ((image->page.width != 0) && (image->page.height != 0)) page=image->page; (void) WriteBlobLSBShort(image,(unsigned short) (page.x < 0 ? 0 : page.x)); (void) WriteBlobLSBShort(image,(unsigned short) (page.y < 0 ? 0 : page.y)); (void) WriteBlobLSBShort(image,(unsigned short) image->columns); (void) WriteBlobLSBShort(image,(unsigned short) image->rows); c=0x00; if (interlace != NoInterlace) c|=0x40; /* pixel data is interlaced */ for (j=0; j < (ssize_t) (3*image->colors); j++) if (colormap[j] != global_colormap[j]) break; if (j == (ssize_t) (3*image->colors)) (void) WriteBlobByte(image,(unsigned char) c); else { c|=0x80; c|=(bits_per_pixel-1); /* size of local colormap */ (void) WriteBlobByte(image,(unsigned char) c); length=(size_t) (3*(one << bits_per_pixel)); (void) WriteBlob(image,length,colormap); } /* Write the image data. */ c=(int) MagickMax(bits_per_pixel,2); (void) WriteBlobByte(image,(unsigned char) c); status=EncodeImage(write_info,image,(size_t) MagickMax(bits_per_pixel,2)+1); if (status == MagickFalse) { global_colormap=(unsigned char *) RelinquishMagickMemory( global_colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); write_info=DestroyImageInfo(write_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } (void) WriteBlobByte(image,(unsigned char) 0x00); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); scene++; status=SetImageProgress(image,SaveImagesTag,scene, GetImageListLength(image)); if (status == MagickFalse) break; } while (write_info->adjoin != MagickFalse); (void) WriteBlobByte(image,';'); /* terminator */ global_colormap=(unsigned char *) RelinquishMagickMemory(global_colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); write_info=DestroyImageInfo(write_info); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_4787_21
crossvul-cpp_data_good_1672_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * Initialization/cleanup for SCTP protocol support. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/seq_file.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/route.h> #include <net/sctp/sctp.h> #include <net/addrconf.h> #include <net/inet_common.h> #include <net/inet_ecn.h> /* Global data structures. */ struct sctp_globals sctp_globals __read_mostly; struct idr sctp_assocs_id; DEFINE_SPINLOCK(sctp_assocs_id_lock); static struct sctp_pf *sctp_pf_inet6_specific; static struct sctp_pf *sctp_pf_inet_specific; static struct sctp_af *sctp_af_v4_specific; static struct sctp_af *sctp_af_v6_specific; struct kmem_cache *sctp_chunk_cachep __read_mostly; struct kmem_cache *sctp_bucket_cachep __read_mostly; long sysctl_sctp_mem[3]; int sysctl_sctp_rmem[3]; int sysctl_sctp_wmem[3]; /* Set up the proc fs entry for the SCTP protocol. */ static int __net_init sctp_proc_init(struct net *net) { #ifdef CONFIG_PROC_FS net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net); if (!net->sctp.proc_net_sctp) goto out_proc_net_sctp; if (sctp_snmp_proc_init(net)) goto out_snmp_proc_init; if (sctp_eps_proc_init(net)) goto out_eps_proc_init; if (sctp_assocs_proc_init(net)) goto out_assocs_proc_init; if (sctp_remaddr_proc_init(net)) goto out_remaddr_proc_init; return 0; out_remaddr_proc_init: sctp_assocs_proc_exit(net); out_assocs_proc_init: sctp_eps_proc_exit(net); out_eps_proc_init: sctp_snmp_proc_exit(net); out_snmp_proc_init: remove_proc_entry("sctp", net->proc_net); net->sctp.proc_net_sctp = NULL; out_proc_net_sctp: return -ENOMEM; #endif /* CONFIG_PROC_FS */ return 0; } /* Clean up the proc fs entry for the SCTP protocol. * Note: Do not make this __exit as it is used in the init error * path. */ static void sctp_proc_exit(struct net *net) { #ifdef CONFIG_PROC_FS sctp_snmp_proc_exit(net); sctp_eps_proc_exit(net); sctp_assocs_proc_exit(net); sctp_remaddr_proc_exit(net); remove_proc_entry("sctp", net->proc_net); net->sctp.proc_net_sctp = NULL; #endif } /* Private helper to extract ipv4 address and stash them in * the protocol structure. */ static void sctp_v4_copy_addrlist(struct list_head *addrlist, struct net_device *dev) { struct in_device *in_dev; struct in_ifaddr *ifa; struct sctp_sockaddr_entry *addr; rcu_read_lock(); if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { rcu_read_unlock(); return; } for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { /* Add the address to the local list. */ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v4.sin_family = AF_INET; addr->a.v4.sin_port = 0; addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->valid = 1; INIT_LIST_HEAD(&addr->list); list_add_tail(&addr->list, addrlist); } } rcu_read_unlock(); } /* Extract our IP addresses from the system and stash them in the * protocol structure. */ static void sctp_get_local_addr_list(struct net *net) { struct net_device *dev; struct list_head *pos; struct sctp_af *af; rcu_read_lock(); for_each_netdev_rcu(net, dev) { list_for_each(pos, &sctp_address_families) { af = list_entry(pos, struct sctp_af, list); af->copy_addrlist(&net->sctp.local_addr_list, dev); } } rcu_read_unlock(); } /* Free the existing local addresses. */ static void sctp_free_local_addr_list(struct net *net) { struct sctp_sockaddr_entry *addr; struct list_head *pos, *temp; list_for_each_safe(pos, temp, &net->sctp.local_addr_list) { addr = list_entry(pos, struct sctp_sockaddr_entry, list); list_del(pos); kfree(addr); } } /* Copy the local addresses which are valid for 'scope' into 'bp'. */ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, sctp_scope_t scope, gfp_t gfp, int copy_flags) { struct sctp_sockaddr_entry *addr; int error = 0; rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if (sctp_in_scope(net, &addr->a, scope)) { /* Now that the address is in scope, check to see if * the address type is really supported by the local * sock as well as the remote peer. */ if ((((AF_INET == addr->a.sa.sa_family) && (copy_flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->a.sa.sa_family) && (copy_flags & SCTP_ADDR6_ALLOWED) && (copy_flags & SCTP_ADDR6_PEERSUPP)))) { error = sctp_add_bind_addr(bp, &addr->a, SCTP_ADDR_SRC, GFP_ATOMIC); if (error) goto end_copy; } } } end_copy: rcu_read_unlock(); return error; } /* Initialize a sctp_addr from in incoming skb. */ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, int is_saddr) { void *from; __be16 *port; struct sctphdr *sh; port = &addr->v4.sin_port; addr->v4.sin_family = AF_INET; sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; from = &ip_hdr(skb)->saddr; } else { *port = sh->dest; from = &ip_hdr(skb)->daddr; } memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); } /* Initialize an sctp_addr from a socket. */ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) { addr->v4.sin_family = AF_INET; addr->v4.sin_port = 0; addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) { inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr; } /* Initialize sk->sk_daddr from sctp_addr. */ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) { inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr; } /* Initialize a sctp_addr from an address parameter. */ static void sctp_v4_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { addr->v4.sin_family = AF_INET; addr->v4.sin_port = port; addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; } /* Initialize an address parameter from a sctp_addr and return the length * of the address parameter. */ static int sctp_v4_to_addr_param(const union sctp_addr *addr, union sctp_addr_param *param) { int length = sizeof(sctp_ipv4addr_param_t); param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; param->v4.param_hdr.length = htons(length); param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; return length; } /* Initialize a sctp_addr from a dst_entry. */ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4, __be16 port) { saddr->v4.sin_family = AF_INET; saddr->v4.sin_port = port; saddr->v4.sin_addr.s_addr = fl4->saddr; } /* Compare two addresses exactly. */ static int sctp_v4_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2) { if (addr1->sa.sa_family != addr2->sa.sa_family) return 0; if (addr1->v4.sin_port != addr2->v4.sin_port) return 0; if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr) return 0; return 1; } /* Initialize addr struct to INADDR_ANY. */ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) { addr->v4.sin_family = AF_INET; addr->v4.sin_addr.s_addr = htonl(INADDR_ANY); addr->v4.sin_port = port; } /* Is this a wildcard address? */ static int sctp_v4_is_any(const union sctp_addr *addr) { return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr; } /* This function checks if the address is a valid address to be used for * SCTP binding. * * Output: * Return 0 - If the address is a non-unicast or an illegal address. * Return 1 - If the address is a unicast. */ static int sctp_v4_addr_valid(union sctp_addr *addr, struct sctp_sock *sp, const struct sk_buff *skb) { /* IPv4 addresses not allowed */ if (sp && ipv6_only_sock(sctp_opt2sk(sp))) return 0; /* Is this a non-unicast address or a unusable SCTP address? */ if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) return 0; /* Is this a broadcast address? */ if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST) return 0; return 1; } /* Should this be available for binding? */ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) { struct net *net = sock_net(&sp->inet.sk); int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr); if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && !net->ipv4.sysctl_ip_nonlocal_bind) return 0; if (ipv6_only_sock(sctp_opt2sk(sp))) return 0; return 1; } /* Checking the loopback, private and other address scopes as defined in * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4 * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>. * * Level 0 - unusable SCTP addresses * Level 1 - loopback address * Level 2 - link-local addresses * Level 3 - private addresses. * Level 4 - global addresses * For INIT and INIT-ACK address list, let L be the level of * of requested destination address, sender and receiver * SHOULD include all of its addresses with level greater * than or equal to L. * * IPv4 scoping can be controlled through sysctl option * net.sctp.addr_scope_policy */ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) { sctp_scope_t retval; /* Check for unusable SCTP addresses. */ if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_UNUSABLE; } else if (ipv4_is_loopback(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_LOOPBACK; } else if (ipv4_is_linklocal_169(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL; } return retval; } /* Returns a valid dst cache entry for the given source and destination ip * addresses. If an association is passed, trys to get a dst entry with a * source address that matches an address in the bind address list. */ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct flowi *fl, struct sock *sk) { struct sctp_association *asoc = t->asoc; struct rtable *rt; struct flowi4 *fl4 = &fl->u.ip4; struct sctp_bind_addr *bp; struct sctp_sockaddr_entry *laddr; struct dst_entry *dst = NULL; union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; memset(fl4, 0x0, sizeof(struct flowi4)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; if (asoc) { fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if; fl4->fl4_sport = htons(asoc->base.bind_addr.port); } if (saddr) { fl4->saddr = saddr->v4.sin_addr.s_addr; fl4->fl4_sport = saddr->v4.sin_port; } pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); if (!IS_ERR(rt)) dst = &rt->dst; /* If there is no association or if a source address is passed, no * more validation is required. */ if (!asoc || saddr) goto out; bp = &asoc->base.bind_addr; if (dst) { /* Walk through the bind address list and look for a bind * address that matches the source address of the returned dst. */ sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port)); rcu_read_lock(); list_for_each_entry_rcu(laddr, &bp->address_list, list) { if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) || (laddr->state != SCTP_ADDR_SRC && !asoc->src_out_of_asoc_ok)) continue; if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) goto out_unlock; } rcu_read_unlock(); /* None of the bound addresses match the source address of the * dst. So release it. */ dst_release(dst); dst = NULL; } /* Walk through the bind address list and try to get a dst that * matches a bind address as the source address. */ rcu_read_lock(); list_for_each_entry_rcu(laddr, &bp->address_list, list) { struct net_device *odev; if (!laddr->valid) continue; if (laddr->state != SCTP_ADDR_SRC || AF_INET != laddr->a.sa.sa_family) continue; fl4->fl4_sport = laddr->a.v4.sin_port; flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if, RT_CONN_FLAGS(asoc->base.sk), daddr->v4.sin_addr.s_addr, laddr->a.v4.sin_addr.s_addr); rt = ip_route_output_key(sock_net(sk), fl4); if (IS_ERR(rt)) continue; if (!dst) dst = &rt->dst; /* Ensure the src address belongs to the output * interface. */ odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { if (&rt->dst != dst) dst_release(&rt->dst); continue; } if (dst != &rt->dst) dst_release(dst); dst = &rt->dst; break; } out_unlock: rcu_read_unlock(); out: t->dst = dst; if (dst) pr_debug("rt_dst:%pI4, rt_src:%pI4\n", &fl4->daddr, &fl4->saddr); else pr_debug("no route\n"); } /* For v4, the source address is cached in the route entry(dst). So no need * to cache it separately and hence this is an empty routine. */ static void sctp_v4_get_saddr(struct sctp_sock *sk, struct sctp_transport *t, struct flowi *fl) { union sctp_addr *saddr = &t->saddr; struct rtable *rt = (struct rtable *)t->dst; if (rt) { saddr->v4.sin_family = AF_INET; saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr; } } /* What interface did this skb arrive on? */ static int sctp_v4_skb_iif(const struct sk_buff *skb) { return inet_iif(skb); } /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v4_is_ce(const struct sk_buff *skb) { return INET_ECN_is_ce(ip_hdr(skb)->tos); } /* Create and initialize a new sk for the socket returned by accept(). */ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, struct sctp_association *asoc) { struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, sk->sk_prot, 0); struct inet_sock *newinet; if (!newsk) goto out; sock_init_data(NULL, newsk); sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(newsk, SOCK_ZAPPED); newinet = inet_sk(newsk); newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; sk_refcnt_debug_inc(newsk); if (newsk->sk_prot->init(newsk)) { sk_common_release(newsk); newsk = NULL; } out: return newsk; } static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) { /* No address mapping for V4 sockets */ return sizeof(struct sockaddr_in); } /* Dump the v4 addr to the seq file. */ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) { seq_printf(seq, "%pI4 ", &addr->v4.sin_addr); } static void sctp_v4_ecn_capable(struct sock *sk) { INET_ECN_xmit(sk); } static void sctp_addr_wq_timeout_handler(unsigned long arg) { struct net *net = (struct net *)arg; struct sctp_sockaddr_entry *addrw, *temp; struct sctp_sock *sp; spin_lock_bh(&net->sctp.addr_wq_lock); list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at " "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa, addrw->state, addrw); #if IS_ENABLED(CONFIG_IPV6) /* Now we send an ASCONF for each association */ /* Note. we currently don't handle link local IPv6 addressees */ if (addrw->a.sa.sa_family == AF_INET6) { struct in6_addr *in6; if (ipv6_addr_type(&addrw->a.v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) goto free_next; in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr; if (ipv6_chk_addr(net, in6, NULL, 0) == 0 && addrw->state == SCTP_ADDR_NEW) { unsigned long timeo_val; pr_debug("%s: this is on DAD, trying %d sec " "later\n", __func__, SCTP_ADDRESS_TICK_DELAY); timeo_val = jiffies; timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); mod_timer(&net->sctp.addr_wq_timer, timeo_val); break; } } #endif list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) { struct sock *sk; sk = sctp_opt2sk(sp); /* ignore bound-specific endpoints */ if (!sctp_is_ep_boundall(sk)) continue; bh_lock_sock(sk); if (sctp_asconf_mgmt(sp, addrw) < 0) pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); bh_unlock_sock(sk); } #if IS_ENABLED(CONFIG_IPV6) free_next: #endif list_del(&addrw->list); kfree(addrw); } spin_unlock_bh(&net->sctp.addr_wq_lock); } static void sctp_free_addr_wq(struct net *net) { struct sctp_sockaddr_entry *addrw; struct sctp_sockaddr_entry *temp; spin_lock_bh(&net->sctp.addr_wq_lock); del_timer(&net->sctp.addr_wq_timer); list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { list_del(&addrw->list); kfree(addrw); } spin_unlock_bh(&net->sctp.addr_wq_lock); } /* lookup the entry for the same address in the addr_waitq * sctp_addr_wq MUST be locked */ static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net, struct sctp_sockaddr_entry *addr) { struct sctp_sockaddr_entry *addrw; list_for_each_entry(addrw, &net->sctp.addr_waitq, list) { if (addrw->a.sa.sa_family != addr->a.sa.sa_family) continue; if (addrw->a.sa.sa_family == AF_INET) { if (addrw->a.v4.sin_addr.s_addr == addr->a.v4.sin_addr.s_addr) return addrw; } else if (addrw->a.sa.sa_family == AF_INET6) { if (ipv6_addr_equal(&addrw->a.v6.sin6_addr, &addr->a.v6.sin6_addr)) return addrw; } } return NULL; } void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd) { struct sctp_sockaddr_entry *addrw; unsigned long timeo_val; /* first, we check if an opposite message already exist in the queue. * If we found such message, it is removed. * This operation is a bit stupid, but the DHCP client attaches the * new address after a couple of addition and deletion of that address */ spin_lock_bh(&net->sctp.addr_wq_lock); /* Offsets existing events in addr_wq */ addrw = sctp_addr_wq_lookup(net, addr); if (addrw) { if (addrw->state != cmd) { pr_debug("%s: offsets existing entry for %d, addr:%pISc " "in wq:%p\n", __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq); list_del(&addrw->list); kfree(addrw); } spin_unlock_bh(&net->sctp.addr_wq_lock); return; } /* OK, we have to add the new address to the wait queue */ addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); if (addrw == NULL) { spin_unlock_bh(&net->sctp.addr_wq_lock); return; } addrw->state = cmd; list_add_tail(&addrw->list, &net->sctp.addr_waitq); pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n", __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq); if (!timer_pending(&net->sctp.addr_wq_timer)) { timeo_val = jiffies; timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); mod_timer(&net->sctp.addr_wq_timer, timeo_val); } spin_unlock_bh(&net->sctp.addr_wq_lock); } /* Event handler for inet address addition/deletion events. * The sctp_local_addr_list needs to be protocted by a spin lock since * multiple notifiers (say IPv4 and IPv6) may be running at the same * time and thus corrupt the list. * The reader side is protected with RCU. */ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; struct net *net = dev_net(ifa->ifa_dev->dev); int found = 0; switch (ev) { case NETDEV_UP: addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); if (addr) { addr->a.v4.sin_family = AF_INET; addr->a.v4.sin_port = 0; addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->valid = 1; spin_lock_bh(&net->sctp.local_addr_lock); list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list); sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW); spin_unlock_bh(&net->sctp.local_addr_lock); } break; case NETDEV_DOWN: spin_lock_bh(&net->sctp.local_addr_lock); list_for_each_entry_safe(addr, temp, &net->sctp.local_addr_list, list) { if (addr->a.sa.sa_family == AF_INET && addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&net->sctp.local_addr_lock); if (found) kfree_rcu(addr, rcu); break; } return NOTIFY_DONE; } /* * Initialize the control inode/socket with a control endpoint data * structure. This endpoint is reserved exclusively for the OOTB processing. */ static int sctp_ctl_sock_init(struct net *net) { int err; sa_family_t family = PF_INET; if (sctp_get_pf_specific(PF_INET6)) family = PF_INET6; err = inet_ctl_sock_create(&net->sctp.ctl_sock, family, SOCK_SEQPACKET, IPPROTO_SCTP, net); /* If IPv6 socket could not be created, try the IPv4 socket */ if (err < 0 && family == PF_INET6) err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP, net); if (err < 0) { pr_err("Failed to create the SCTP control socket\n"); return err; } return 0; } /* Register address family specific functions. */ int sctp_register_af(struct sctp_af *af) { switch (af->sa_family) { case AF_INET: if (sctp_af_v4_specific) return 0; sctp_af_v4_specific = af; break; case AF_INET6: if (sctp_af_v6_specific) return 0; sctp_af_v6_specific = af; break; default: return 0; } INIT_LIST_HEAD(&af->list); list_add_tail(&af->list, &sctp_address_families); return 1; } /* Get the table of functions for manipulating a particular address * family. */ struct sctp_af *sctp_get_af_specific(sa_family_t family) { switch (family) { case AF_INET: return sctp_af_v4_specific; case AF_INET6: return sctp_af_v6_specific; default: return NULL; } } /* Common code to initialize a AF_INET msg_name. */ static void sctp_inet_msgname(char *msgname, int *addr_len) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)msgname; *addr_len = sizeof(struct sockaddr_in); sin->sin_family = AF_INET; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } /* Copy the primary address of the peer primary address as the msg_name. */ static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, int *addr_len) { struct sockaddr_in *sin, *sinfrom; if (msgname) { struct sctp_association *asoc; asoc = event->asoc; sctp_inet_msgname(msgname, addr_len); sin = (struct sockaddr_in *)msgname; sinfrom = &asoc->peer.primary_addr.v4; sin->sin_port = htons(asoc->peer.port); sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr; } } /* Initialize and copy out a msgname from an inbound skb. */ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) { if (msgname) { struct sctphdr *sh = sctp_hdr(skb); struct sockaddr_in *sin = (struct sockaddr_in *)msgname; sctp_inet_msgname(msgname, len); sin->sin_port = sh->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; } } /* Do we support this AF? */ static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) { /* PF_INET only supports AF_INET addresses. */ return AF_INET == family; } /* Address matching with wildcards allowed. */ static int sctp_inet_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2, struct sctp_sock *opt) { /* PF_INET only supports AF_INET addresses. */ if (addr1->sa.sa_family != addr2->sa.sa_family) return 0; if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr || htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr) return 1; if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) return 1; return 0; } /* Verify that provided sockaddr looks bindable. Common verification has * already been taken care of. */ static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) { return sctp_v4_available(addr, opt); } /* Verify that sockaddr looks sendable. Common verification has already * been taken care of. */ static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) { return 1; } /* Fill in Supported Address Type information for INIT and INIT-ACK * chunks. Returns number of addresses supported. */ static int sctp_inet_supported_addrs(const struct sctp_sock *opt, __be16 *types) { types[0] = SCTP_PARAM_IPV4_ADDRESS; return 1; } /* Wrapper routine that calls the ip transmit routine. */ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *transport) { struct inet_sock *inet = inet_sk(skb->sk); pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb, skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT; SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); return ip_queue_xmit(&inet->sk, skb, &transport->fl); } static struct sctp_af sctp_af_inet; static struct sctp_pf sctp_pf_inet = { .event_msgname = sctp_inet_event_msgname, .skb_msgname = sctp_inet_skb_msgname, .af_supported = sctp_inet_af_supported, .cmp_addr = sctp_inet_cmp_addr, .bind_verify = sctp_inet_bind_verify, .send_verify = sctp_inet_send_verify, .supported_addrs = sctp_inet_supported_addrs, .create_accept_sk = sctp_v4_create_accept_sk, .addr_to_user = sctp_v4_addr_to_user, .to_sk_saddr = sctp_v4_to_sk_saddr, .to_sk_daddr = sctp_v4_to_sk_daddr, .af = &sctp_af_inet }; /* Notifier for inetaddr addition/deletion events. */ static struct notifier_block sctp_inetaddr_notifier = { .notifier_call = sctp_inetaddr_event, }; /* Socket operations. */ static const struct proto_ops inet_seqpacket_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, /* Needs to be wrapped... */ .bind = inet_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* Semantics are different. */ .poll = sctp_poll, .ioctl = inet_ioctl, .listen = sctp_inet_listen, .shutdown = inet_shutdown, /* Looks harmless. */ .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */ .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; /* Registration with AF_INET family. */ static struct inet_protosw sctp_seqpacket_protosw = { .type = SOCK_SEQPACKET, .protocol = IPPROTO_SCTP, .prot = &sctp_prot, .ops = &inet_seqpacket_ops, .flags = SCTP_PROTOSW_FLAG }; static struct inet_protosw sctp_stream_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_SCTP, .prot = &sctp_prot, .ops = &inet_seqpacket_ops, .flags = SCTP_PROTOSW_FLAG }; /* Register with IP layer. */ static const struct net_protocol sctp_protocol = { .handler = sctp_rcv, .err_handler = sctp_v4_err, .no_policy = 1, .netns_ok = 1, .icmp_strict_tag_validation = 1, }; /* IPv4 address related functions. */ static struct sctp_af sctp_af_inet = { .sa_family = AF_INET, .sctp_xmit = sctp_v4_xmit, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .get_dst = sctp_v4_get_dst, .get_saddr = sctp_v4_get_saddr, .copy_addrlist = sctp_v4_copy_addrlist, .from_skb = sctp_v4_from_skb, .from_sk = sctp_v4_from_sk, .from_addr_param = sctp_v4_from_addr_param, .to_addr_param = sctp_v4_to_addr_param, .cmp_addr = sctp_v4_cmp_addr, .addr_valid = sctp_v4_addr_valid, .inaddr_any = sctp_v4_inaddr_any, .is_any = sctp_v4_is_any, .available = sctp_v4_available, .scope = sctp_v4_scope, .skb_iif = sctp_v4_skb_iif, .is_ce = sctp_v4_is_ce, .seq_dump_addr = sctp_v4_seq_dump_addr, .ecn_capable = sctp_v4_ecn_capable, .net_header_len = sizeof(struct iphdr), .sockaddr_len = sizeof(struct sockaddr_in), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; struct sctp_pf *sctp_get_pf_specific(sa_family_t family) { switch (family) { case PF_INET: return sctp_pf_inet_specific; case PF_INET6: return sctp_pf_inet6_specific; default: return NULL; } } /* Register the PF specific function table. */ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) { switch (family) { case PF_INET: if (sctp_pf_inet_specific) return 0; sctp_pf_inet_specific = pf; break; case PF_INET6: if (sctp_pf_inet6_specific) return 0; sctp_pf_inet6_specific = pf; break; default: return 0; } return 1; } static inline int init_sctp_mibs(struct net *net) { net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib); if (!net->sctp.sctp_statistics) return -ENOMEM; return 0; } static inline void cleanup_sctp_mibs(struct net *net) { free_percpu(net->sctp.sctp_statistics); } static void sctp_v4_pf_init(void) { /* Initialize the SCTP specific PF functions. */ sctp_register_pf(&sctp_pf_inet, PF_INET); sctp_register_af(&sctp_af_inet); } static void sctp_v4_pf_exit(void) { list_del(&sctp_af_inet.list); } static int sctp_v4_protosw_init(void) { int rc; rc = proto_register(&sctp_prot, 1); if (rc) return rc; /* Register SCTP(UDP and TCP style) with socket layer. */ inet_register_protosw(&sctp_seqpacket_protosw); inet_register_protosw(&sctp_stream_protosw); return 0; } static void sctp_v4_protosw_exit(void) { inet_unregister_protosw(&sctp_stream_protosw); inet_unregister_protosw(&sctp_seqpacket_protosw); proto_unregister(&sctp_prot); } static int sctp_v4_add_protocol(void) { /* Register notifier for inet address additions/deletions. */ register_inetaddr_notifier(&sctp_inetaddr_notifier); /* Register SCTP with inet layer. */ if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) return -EAGAIN; return 0; } static void sctp_v4_del_protocol(void) { inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); unregister_inetaddr_notifier(&sctp_inetaddr_notifier); } static int __net_init sctp_defaults_init(struct net *net) { int status; /* * 14. Suggested SCTP Protocol Parameter Values */ /* The following protocol parameters are RECOMMENDED: */ /* RTO.Initial - 3 seconds */ net->sctp.rto_initial = SCTP_RTO_INITIAL; /* RTO.Min - 1 second */ net->sctp.rto_min = SCTP_RTO_MIN; /* RTO.Max - 60 seconds */ net->sctp.rto_max = SCTP_RTO_MAX; /* RTO.Alpha - 1/8 */ net->sctp.rto_alpha = SCTP_RTO_ALPHA; /* RTO.Beta - 1/4 */ net->sctp.rto_beta = SCTP_RTO_BETA; /* Valid.Cookie.Life - 60 seconds */ net->sctp.valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE; /* Whether Cookie Preservative is enabled(1) or not(0) */ net->sctp.cookie_preserve_enable = 1; /* Default sctp sockets to use md5 as their hmac alg */ #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5) net->sctp.sctp_hmac_alg = "md5"; #elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1) net->sctp.sctp_hmac_alg = "sha1"; #else net->sctp.sctp_hmac_alg = NULL; #endif /* Max.Burst - 4 */ net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST; /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) * Max.Init.Retransmits - 8 attempts */ net->sctp.max_retrans_association = 10; net->sctp.max_retrans_path = 5; net->sctp.max_retrans_init = 8; /* Sendbuffer growth - do per-socket accounting */ net->sctp.sndbuf_policy = 0; /* Rcvbuffer growth - do per-socket accounting */ net->sctp.rcvbuf_policy = 0; /* HB.interval - 30 seconds */ net->sctp.hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; /* delayed SACK timeout */ net->sctp.sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK; /* Disable ADDIP by default. */ net->sctp.addip_enable = 0; net->sctp.addip_noauth = 0; net->sctp.default_auto_asconf = 0; /* Enable PR-SCTP by default. */ net->sctp.prsctp_enable = 1; /* Disable AUTH by default. */ net->sctp.auth_enable = 0; /* Set SCOPE policy to enabled */ net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE; /* Set the default rwnd update threshold */ net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT; /* Initialize maximum autoclose timeout. */ net->sctp.max_autoclose = INT_MAX / HZ; status = sctp_sysctl_net_register(net); if (status) goto err_sysctl_register; /* Allocate and initialise sctp mibs. */ status = init_sctp_mibs(net); if (status) goto err_init_mibs; /* Initialize proc fs directory. */ status = sctp_proc_init(net); if (status) goto err_init_proc; sctp_dbg_objcnt_init(net); /* Initialize the local address list. */ INIT_LIST_HEAD(&net->sctp.local_addr_list); spin_lock_init(&net->sctp.local_addr_lock); sctp_get_local_addr_list(net); /* Initialize the address event list */ INIT_LIST_HEAD(&net->sctp.addr_waitq); INIT_LIST_HEAD(&net->sctp.auto_asconf_splist); spin_lock_init(&net->sctp.addr_wq_lock); net->sctp.addr_wq_timer.expires = 0; setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler, (unsigned long)net); return 0; err_init_proc: cleanup_sctp_mibs(net); err_init_mibs: sctp_sysctl_net_unregister(net); err_sysctl_register: return status; } static void __net_exit sctp_defaults_exit(struct net *net) { /* Free the local address list */ sctp_free_addr_wq(net); sctp_free_local_addr_list(net); sctp_dbg_objcnt_exit(net); sctp_proc_exit(net); cleanup_sctp_mibs(net); sctp_sysctl_net_unregister(net); } static struct pernet_operations sctp_defaults_ops = { .init = sctp_defaults_init, .exit = sctp_defaults_exit, }; static int __net_init sctp_ctrlsock_init(struct net *net) { int status; /* Initialize the control inode/socket for handling OOTB packets. */ status = sctp_ctl_sock_init(net); if (status) pr_err("Failed to initialize the SCTP control sock\n"); return status; } static void __net_init sctp_ctrlsock_exit(struct net *net) { /* Free the control endpoint. */ inet_ctl_sock_destroy(net->sctp.ctl_sock); } static struct pernet_operations sctp_ctrlsock_ops = { .init = sctp_ctrlsock_init, .exit = sctp_ctrlsock_exit, }; /* Initialize the universe into something sensible. */ static __init int sctp_init(void) { int i; int status = -EINVAL; unsigned long goal; unsigned long limit; int max_share; int order; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", sizeof(struct sctp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_bucket_cachep) goto out; sctp_chunk_cachep = kmem_cache_create("sctp_chunk", sizeof(struct sctp_chunk), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_chunk_cachep) goto err_chunk_cachep; status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); if (status) goto err_percpu_counter_init; /* Implementation specific variables. */ /* Initialize default stream count setup information. */ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold*/ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; sysctl_sctp_wmem[1] = 16*1024; sysctl_sctp_wmem[2] = max(64*1024, max_share); /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (22 - PAGE_SHIFT); else goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; do { sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_hashbucket); if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); status = -ENOMEM; goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { rwlock_init(&sctp_assoc_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); } /* Allocate and initialize the endpoint hash table. */ sctp_ep_hashsize = 64; sctp_ep_hashtable = kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); if (!sctp_ep_hashtable) { pr_err("Failed endpoint_hash alloc\n"); status = -ENOMEM; goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { rwlock_init(&sctp_ep_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); } /* Allocate and initialize the SCTP port hash table. */ do { sctp_port_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_bind_hashbucket); if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); status = -ENOMEM; goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { spin_lock_init(&sctp_port_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); } pr_info("Hash tables configured (established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); sctp_v4_pf_init(); sctp_v6_pf_init(); status = register_pernet_subsys(&sctp_defaults_ops); if (status) goto err_register_defaults; status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_ctrlsock_ops); if (status) goto err_register_ctrlsock; status = sctp_v4_add_protocol(); if (status) goto err_add_protocol; /* Register SCTP with inet6 layer. */ status = sctp_v6_add_protocol(); if (status) goto err_v6_add_protocol; out: return status; err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: unregister_pernet_subsys(&sctp_ctrlsock_ops); err_register_ctrlsock: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: unregister_pernet_subsys(&sctp_defaults_ops); err_register_defaults: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); err_bhash_alloc: kfree(sctp_ep_hashtable); err_ehash_alloc: free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); err_ahash_alloc: percpu_counter_destroy(&sctp_sockets_allocated); err_percpu_counter_init: kmem_cache_destroy(sctp_chunk_cachep); err_chunk_cachep: kmem_cache_destroy(sctp_bucket_cachep); goto out; } /* Exit handler for the SCTP protocol. */ static __exit void sctp_exit(void) { /* BUG. This should probably do something useful like clean * up all the remaining associations and all that memory. */ /* Unregister with inet6/inet layers. */ sctp_v6_del_protocol(); sctp_v4_del_protocol(); unregister_pernet_subsys(&sctp_ctrlsock_ops); /* Free protosw registrations */ sctp_v6_protosw_exit(); sctp_v4_protosw_exit(); unregister_pernet_subsys(&sctp_defaults_ops); /* Unregister with socket layer. */ sctp_v6_pf_exit(); sctp_v4_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); kfree(sctp_ep_hashtable); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); percpu_counter_destroy(&sctp_sockets_allocated); rcu_barrier(); /* Wait for completion of call_rcu()'s */ kmem_cache_destroy(sctp_chunk_cachep); kmem_cache_destroy(sctp_bucket_cachep); } module_init(sctp_init); module_exit(sctp_exit); /* * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. */ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>"); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-119/c/good_1672_0
crossvul-cpp_data_good_2593_1
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ #include "q_shared.h" #include "qcommon.h" static huffman_t msgHuff; static qboolean msgInit = qfalse; int pcount[256]; /* ============================================================================== MESSAGE IO FUNCTIONS Handles byte ordering and avoids alignment errors ============================================================================== */ int oldsize = 0; void MSG_initHuffman( void ); void MSG_Init( msg_t *buf, byte *data, int length ) { if (!msgInit) { MSG_initHuffman(); } Com_Memset (buf, 0, sizeof(*buf)); buf->data = data; buf->maxsize = length; } void MSG_InitOOB( msg_t *buf, byte *data, int length ) { if (!msgInit) { MSG_initHuffman(); } Com_Memset (buf, 0, sizeof(*buf)); buf->data = data; buf->maxsize = length; buf->oob = qtrue; } void MSG_Clear( msg_t *buf ) { buf->cursize = 0; buf->overflowed = qfalse; buf->bit = 0; //<- in bits } void MSG_Bitstream( msg_t *buf ) { buf->oob = qfalse; } void MSG_BeginReading( msg_t *msg ) { msg->readcount = 0; msg->bit = 0; msg->oob = qfalse; } void MSG_BeginReadingOOB( msg_t *msg ) { msg->readcount = 0; msg->bit = 0; msg->oob = qtrue; } void MSG_Copy(msg_t *buf, byte *data, int length, msg_t *src) { if (length<src->cursize) { Com_Error( ERR_DROP, "MSG_Copy: can't copy into a smaller msg_t buffer"); } Com_Memcpy(buf, src, sizeof(msg_t)); buf->data = data; Com_Memcpy(buf->data, src->data, src->cursize); } /* ============================================================================= bit functions ============================================================================= */ // negative bit values include signs void MSG_WriteBits( msg_t *msg, int value, int bits ) { int i; oldsize += bits; if ( msg->overflowed ) { return; } if ( bits == 0 || bits < -31 || bits > 32 ) { Com_Error( ERR_DROP, "MSG_WriteBits: bad bits %i", bits ); } if ( bits < 0 ) { bits = -bits; } if ( msg->oob ) { if ( msg->cursize + ( bits >> 3 ) > msg->maxsize ) { msg->overflowed = qtrue; return; } if ( bits == 8 ) { msg->data[msg->cursize] = value; msg->cursize += 1; msg->bit += 8; } else if ( bits == 16 ) { short temp = value; CopyLittleShort( &msg->data[msg->cursize], &temp ); msg->cursize += 2; msg->bit += 16; } else if ( bits==32 ) { CopyLittleLong( &msg->data[msg->cursize], &value ); msg->cursize += 4; msg->bit += 32; } else { Com_Error( ERR_DROP, "can't write %d bits", bits ); } } else { value &= (0xffffffff >> (32 - bits)); if ( bits&7 ) { int nbits; nbits = bits&7; if ( msg->bit + nbits > msg->maxsize << 3 ) { msg->overflowed = qtrue; return; } for( i = 0; i < nbits; i++ ) { Huff_putBit( (value & 1), msg->data, &msg->bit ); value = (value >> 1); } bits = bits - nbits; } if ( bits ) { for( i = 0; i < bits; i += 8 ) { Huff_offsetTransmit( &msgHuff.compressor, (value & 0xff), msg->data, &msg->bit, msg->maxsize << 3 ); value = (value >> 8); if ( msg->bit > msg->maxsize << 3 ) { msg->overflowed = qtrue; return; } } } msg->cursize = (msg->bit >> 3) + 1; } } int MSG_ReadBits( msg_t *msg, int bits ) { int value; int get; qboolean sgn; int i, nbits; // FILE* fp; if ( msg->readcount > msg->cursize ) { return 0; } value = 0; if ( bits < 0 ) { bits = -bits; sgn = qtrue; } else { sgn = qfalse; } if (msg->oob) { if (msg->readcount + (bits>>3) > msg->cursize) { msg->readcount = msg->cursize + 1; return 0; } if(bits==8) { value = msg->data[msg->readcount]; msg->readcount += 1; msg->bit += 8; } else if(bits==16) { short temp; CopyLittleShort(&temp, &msg->data[msg->readcount]); value = temp; msg->readcount += 2; msg->bit += 16; } else if(bits==32) { CopyLittleLong(&value, &msg->data[msg->readcount]); msg->readcount += 4; msg->bit += 32; } else Com_Error(ERR_DROP, "can't read %d bits", bits); } else { nbits = 0; if (bits&7) { nbits = bits&7; if (msg->bit + nbits > msg->cursize << 3) { msg->readcount = msg->cursize + 1; return 0; } for(i=0;i<nbits;i++) { value |= (Huff_getBit(msg->data, &msg->bit)<<i); } bits = bits - nbits; } if (bits) { // fp = fopen("c:\\netchan.bin", "a"); for(i=0;i<bits;i+=8) { Huff_offsetReceive (msgHuff.decompressor.tree, &get, msg->data, &msg->bit, msg->cursize<<3); // fwrite(&get, 1, 1, fp); value |= (get<<(i+nbits)); if (msg->bit > msg->cursize<<3) { msg->readcount = msg->cursize + 1; return 0; } } // fclose(fp); } msg->readcount = (msg->bit>>3)+1; } if ( sgn && bits > 0 && bits < 32 ) { if ( value & ( 1 << ( bits - 1 ) ) ) { value |= -1 ^ ( ( 1 << bits ) - 1 ); } } return value; } //================================================================================ // // writing functions // void MSG_WriteChar( msg_t *sb, int c ) { #ifdef PARANOID if (c < -128 || c > 127) Com_Error (ERR_FATAL, "MSG_WriteChar: range error"); #endif MSG_WriteBits( sb, c, 8 ); } void MSG_WriteByte( msg_t *sb, int c ) { #ifdef PARANOID if (c < 0 || c > 255) Com_Error (ERR_FATAL, "MSG_WriteByte: range error"); #endif MSG_WriteBits( sb, c, 8 ); } void MSG_WriteData( msg_t *buf, const void *data, int length ) { int i; for(i=0;i<length;i++) { MSG_WriteByte(buf, ((byte *)data)[i]); } } void MSG_WriteShort( msg_t *sb, int c ) { #ifdef PARANOID if (c < ((short)0x8000) || c > (short)0x7fff) Com_Error (ERR_FATAL, "MSG_WriteShort: range error"); #endif MSG_WriteBits( sb, c, 16 ); } void MSG_WriteLong( msg_t *sb, int c ) { MSG_WriteBits( sb, c, 32 ); } void MSG_WriteFloat( msg_t *sb, float f ) { floatint_t dat; dat.f = f; MSG_WriteBits( sb, dat.i, 32 ); } void MSG_WriteString( msg_t *sb, const char *s ) { if ( !s ) { MSG_WriteData (sb, "", 1); } else { int l,i; char string[MAX_STRING_CHARS]; l = strlen( s ); if ( l >= MAX_STRING_CHARS ) { Com_Printf( "MSG_WriteString: MAX_STRING_CHARS" ); MSG_WriteData (sb, "", 1); return; } Q_strncpyz( string, s, sizeof( string ) ); // get rid of 0x80+ and '%' chars, because old clients don't like them for ( i = 0 ; i < l ; i++ ) { if ( ((byte *)string)[i] > 127 || string[i] == '%' ) { string[i] = '.'; } } MSG_WriteData (sb, string, l+1); } } void MSG_WriteBigString( msg_t *sb, const char *s ) { if ( !s ) { MSG_WriteData (sb, "", 1); } else { int l,i; char string[BIG_INFO_STRING]; l = strlen( s ); if ( l >= BIG_INFO_STRING ) { Com_Printf( "MSG_WriteString: BIG_INFO_STRING" ); MSG_WriteData (sb, "", 1); return; } Q_strncpyz( string, s, sizeof( string ) ); // get rid of 0x80+ and '%' chars, because old clients don't like them for ( i = 0 ; i < l ; i++ ) { if ( ((byte *)string)[i] > 127 || string[i] == '%' ) { string[i] = '.'; } } MSG_WriteData (sb, string, l+1); } } void MSG_WriteAngle( msg_t *sb, float f ) { MSG_WriteByte (sb, (int)(f*256/360) & 255); } void MSG_WriteAngle16( msg_t *sb, float f ) { MSG_WriteShort (sb, ANGLE2SHORT(f)); } //============================================================ // // reading functions // // returns -1 if no more characters are available int MSG_ReadChar (msg_t *msg ) { int c; c = (signed char)MSG_ReadBits( msg, 8 ); if ( msg->readcount > msg->cursize ) { c = -1; } return c; } int MSG_ReadByte( msg_t *msg ) { int c; c = (unsigned char)MSG_ReadBits( msg, 8 ); if ( msg->readcount > msg->cursize ) { c = -1; } return c; } int MSG_LookaheadByte( msg_t *msg ) { const int bloc = Huff_getBloc(); const int readcount = msg->readcount; const int bit = msg->bit; int c = MSG_ReadByte(msg); Huff_setBloc(bloc); msg->readcount = readcount; msg->bit = bit; return c; } int MSG_ReadShort( msg_t *msg ) { int c; c = (short)MSG_ReadBits( msg, 16 ); if ( msg->readcount > msg->cursize ) { c = -1; } return c; } int MSG_ReadLong( msg_t *msg ) { int c; c = MSG_ReadBits( msg, 32 ); if ( msg->readcount > msg->cursize ) { c = -1; } return c; } float MSG_ReadFloat( msg_t *msg ) { floatint_t dat; dat.i = MSG_ReadBits( msg, 32 ); if ( msg->readcount > msg->cursize ) { dat.f = -1; } return dat.f; } char *MSG_ReadString( msg_t *msg ) { static char string[MAX_STRING_CHARS]; int l,c; l = 0; do { c = MSG_ReadByte(msg); // use ReadByte so -1 is out of bounds if ( c == -1 || c == 0 ) { break; } // translate all fmt spec to avoid crash bugs if ( c == '%' ) { c = '.'; } // don't allow higher ascii values if ( c > 127 ) { c = '.'; } string[l] = c; l++; } while (l < sizeof(string)-1); string[l] = 0; return string; } char *MSG_ReadBigString( msg_t *msg ) { static char string[BIG_INFO_STRING]; int l,c; l = 0; do { c = MSG_ReadByte(msg); // use ReadByte so -1 is out of bounds if ( c == -1 || c == 0 ) { break; } // translate all fmt spec to avoid crash bugs if ( c == '%' ) { c = '.'; } // don't allow higher ascii values if ( c > 127 ) { c = '.'; } string[l] = c; l++; } while (l < sizeof(string)-1); string[l] = 0; return string; } char *MSG_ReadStringLine( msg_t *msg ) { static char string[MAX_STRING_CHARS]; int l,c; l = 0; do { c = MSG_ReadByte(msg); // use ReadByte so -1 is out of bounds if (c == -1 || c == 0 || c == '\n') { break; } // translate all fmt spec to avoid crash bugs if ( c == '%' ) { c = '.'; } // don't allow higher ascii values if ( c > 127 ) { c = '.'; } string[l] = c; l++; } while (l < sizeof(string)-1); string[l] = 0; return string; } float MSG_ReadAngle16( msg_t *msg ) { return SHORT2ANGLE(MSG_ReadShort(msg)); } void MSG_ReadData( msg_t *msg, void *data, int len ) { int i; for (i=0 ; i<len ; i++) { ((byte *)data)[i] = MSG_ReadByte (msg); } } // a string hasher which gives the same hash value even if the // string is later modified via the legacy MSG read/write code int MSG_HashKey(const char *string, int maxlen) { int hash, i; hash = 0; for (i = 0; i < maxlen && string[i] != '\0'; i++) { if (string[i] & 0x80 || string[i] == '%') hash += '.' * (119 + i); else hash += string[i] * (119 + i); } hash = (hash ^ (hash >> 10) ^ (hash >> 20)); return hash; } /* ============================================================================= delta functions ============================================================================= */ extern cvar_t *cl_shownet; #define LOG(x) if( cl_shownet && cl_shownet->integer == 4 ) { Com_Printf("%s ", x ); }; void MSG_WriteDelta( msg_t *msg, int oldV, int newV, int bits ) { if ( oldV == newV ) { MSG_WriteBits( msg, 0, 1 ); return; } MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, newV, bits ); } int MSG_ReadDelta( msg_t *msg, int oldV, int bits ) { if ( MSG_ReadBits( msg, 1 ) ) { return MSG_ReadBits( msg, bits ); } return oldV; } void MSG_WriteDeltaFloat( msg_t *msg, float oldV, float newV ) { floatint_t fi; if ( oldV == newV ) { MSG_WriteBits( msg, 0, 1 ); return; } fi.f = newV; MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, fi.i, 32 ); } float MSG_ReadDeltaFloat( msg_t *msg, float oldV ) { if ( MSG_ReadBits( msg, 1 ) ) { floatint_t fi; fi.i = MSG_ReadBits( msg, 32 ); return fi.f; } return oldV; } /* ============================================================================= delta functions with keys ============================================================================= */ int kbitmask[32] = { 0x00000001, 0x00000003, 0x00000007, 0x0000000F, 0x0000001F, 0x0000003F, 0x0000007F, 0x000000FF, 0x000001FF, 0x000003FF, 0x000007FF, 0x00000FFF, 0x00001FFF, 0x00003FFF, 0x00007FFF, 0x0000FFFF, 0x0001FFFF, 0x0003FFFF, 0x0007FFFF, 0x000FFFFF, 0x001FFFFf, 0x003FFFFF, 0x007FFFFF, 0x00FFFFFF, 0x01FFFFFF, 0x03FFFFFF, 0x07FFFFFF, 0x0FFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, }; void MSG_WriteDeltaKey( msg_t *msg, int key, int oldV, int newV, int bits ) { if ( oldV == newV ) { MSG_WriteBits( msg, 0, 1 ); return; } MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, newV ^ key, bits ); } int MSG_ReadDeltaKey( msg_t *msg, int key, int oldV, int bits ) { if ( MSG_ReadBits( msg, 1 ) ) { return MSG_ReadBits( msg, bits ) ^ (key & kbitmask[ bits - 1 ]); } return oldV; } void MSG_WriteDeltaKeyFloat( msg_t *msg, int key, float oldV, float newV ) { floatint_t fi; if ( oldV == newV ) { MSG_WriteBits( msg, 0, 1 ); return; } fi.f = newV; MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, fi.i ^ key, 32 ); } float MSG_ReadDeltaKeyFloat( msg_t *msg, int key, float oldV ) { if ( MSG_ReadBits( msg, 1 ) ) { floatint_t fi; fi.i = MSG_ReadBits( msg, 32 ) ^ key; return fi.f; } return oldV; } /* ============================================================================ usercmd_t communication ============================================================================ */ /* ===================== MSG_WriteDeltaUsercmdKey ===================== */ void MSG_WriteDeltaUsercmdKey( msg_t *msg, int key, usercmd_t *from, usercmd_t *to ) { if ( to->serverTime - from->serverTime < 256 ) { MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, to->serverTime - from->serverTime, 8 ); } else { MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, to->serverTime, 32 ); } if (from->angles[0] == to->angles[0] && from->angles[1] == to->angles[1] && from->angles[2] == to->angles[2] && from->forwardmove == to->forwardmove && from->rightmove == to->rightmove && from->upmove == to->upmove && from->buttons == to->buttons && from->weapon == to->weapon) { MSG_WriteBits( msg, 0, 1 ); // no change oldsize += 7; return; } key ^= to->serverTime; MSG_WriteBits( msg, 1, 1 ); MSG_WriteDeltaKey( msg, key, from->angles[0], to->angles[0], 16 ); MSG_WriteDeltaKey( msg, key, from->angles[1], to->angles[1], 16 ); MSG_WriteDeltaKey( msg, key, from->angles[2], to->angles[2], 16 ); MSG_WriteDeltaKey( msg, key, from->forwardmove, to->forwardmove, 8 ); MSG_WriteDeltaKey( msg, key, from->rightmove, to->rightmove, 8 ); MSG_WriteDeltaKey( msg, key, from->upmove, to->upmove, 8 ); MSG_WriteDeltaKey( msg, key, from->buttons, to->buttons, 16 ); MSG_WriteDeltaKey( msg, key, from->weapon, to->weapon, 8 ); } /* ===================== MSG_ReadDeltaUsercmdKey ===================== */ void MSG_ReadDeltaUsercmdKey( msg_t *msg, int key, usercmd_t *from, usercmd_t *to ) { if ( MSG_ReadBits( msg, 1 ) ) { to->serverTime = from->serverTime + MSG_ReadBits( msg, 8 ); } else { to->serverTime = MSG_ReadBits( msg, 32 ); } if ( MSG_ReadBits( msg, 1 ) ) { key ^= to->serverTime; to->angles[0] = MSG_ReadDeltaKey( msg, key, from->angles[0], 16); to->angles[1] = MSG_ReadDeltaKey( msg, key, from->angles[1], 16); to->angles[2] = MSG_ReadDeltaKey( msg, key, from->angles[2], 16); to->forwardmove = MSG_ReadDeltaKey( msg, key, from->forwardmove, 8); if( to->forwardmove == -128 ) to->forwardmove = -127; to->rightmove = MSG_ReadDeltaKey( msg, key, from->rightmove, 8); if( to->rightmove == -128 ) to->rightmove = -127; to->upmove = MSG_ReadDeltaKey( msg, key, from->upmove, 8); if( to->upmove == -128 ) to->upmove = -127; to->buttons = MSG_ReadDeltaKey( msg, key, from->buttons, 16); to->weapon = MSG_ReadDeltaKey( msg, key, from->weapon, 8); } else { to->angles[0] = from->angles[0]; to->angles[1] = from->angles[1]; to->angles[2] = from->angles[2]; to->forwardmove = from->forwardmove; to->rightmove = from->rightmove; to->upmove = from->upmove; to->buttons = from->buttons; to->weapon = from->weapon; } } /* ============================================================================= entityState_t communication ============================================================================= */ /* ================= MSG_ReportChangeVectors_f Prints out a table from the current statistics for copying to code ================= */ void MSG_ReportChangeVectors_f( void ) { int i; for(i=0;i<256;i++) { if (pcount[i]) { Com_Printf("%d used %d\n", i, pcount[i]); } } } typedef struct { char *name; int offset; int bits; // 0 = float } netField_t; // using the stringizing operator to save typing... #define NETF(x) #x,(size_t)&((entityState_t*)0)->x netField_t entityStateFields[] = { { NETF(pos.trTime), 32 }, { NETF(pos.trBase[0]), 0 }, { NETF(pos.trBase[1]), 0 }, { NETF(pos.trDelta[0]), 0 }, { NETF(pos.trDelta[1]), 0 }, { NETF(pos.trBase[2]), 0 }, { NETF(apos.trBase[1]), 0 }, { NETF(pos.trDelta[2]), 0 }, { NETF(apos.trBase[0]), 0 }, { NETF(event), 10 }, { NETF(angles2[1]), 0 }, { NETF(eType), 8 }, { NETF(torsoAnim), 8 }, { NETF(eventParm), 8 }, { NETF(legsAnim), 8 }, { NETF(groundEntityNum), GENTITYNUM_BITS }, { NETF(pos.trType), 8 }, { NETF(eFlags), 19 }, { NETF(otherEntityNum), GENTITYNUM_BITS }, { NETF(weapon), 8 }, { NETF(clientNum), 8 }, { NETF(angles[1]), 0 }, { NETF(pos.trDuration), 32 }, { NETF(apos.trType), 8 }, { NETF(origin[0]), 0 }, { NETF(origin[1]), 0 }, { NETF(origin[2]), 0 }, { NETF(solid), 24 }, { NETF(powerups), MAX_POWERUPS }, { NETF(modelindex), 8 }, { NETF(otherEntityNum2), GENTITYNUM_BITS }, { NETF(loopSound), 8 }, { NETF(generic1), 8 }, { NETF(origin2[2]), 0 }, { NETF(origin2[0]), 0 }, { NETF(origin2[1]), 0 }, { NETF(modelindex2), 8 }, { NETF(angles[0]), 0 }, { NETF(time), 32 }, { NETF(apos.trTime), 32 }, { NETF(apos.trDuration), 32 }, { NETF(apos.trBase[2]), 0 }, { NETF(apos.trDelta[0]), 0 }, { NETF(apos.trDelta[1]), 0 }, { NETF(apos.trDelta[2]), 0 }, { NETF(time2), 32 }, { NETF(angles[2]), 0 }, { NETF(angles2[0]), 0 }, { NETF(angles2[2]), 0 }, { NETF(constantLight), 32 }, { NETF(frame), 16 } }; // if (int)f == f and (int)f + ( 1<<(FLOAT_INT_BITS-1) ) < ( 1 << FLOAT_INT_BITS ) // the float will be sent with FLOAT_INT_BITS, otherwise all 32 bits will be sent #define FLOAT_INT_BITS 13 #define FLOAT_INT_BIAS (1<<(FLOAT_INT_BITS-1)) /* ================== MSG_WriteDeltaEntity Writes part of a packetentities message, including the entity number. Can delta from either a baseline or a previous packet_entity If to is NULL, a remove entity update will be sent If force is not set, then nothing at all will be generated if the entity is identical, under the assumption that the in-order delta code will catch it. ================== */ void MSG_WriteDeltaEntity( msg_t *msg, struct entityState_s *from, struct entityState_s *to, qboolean force ) { int i, lc; int numFields; netField_t *field; int trunc; float fullFloat; int *fromF, *toF; numFields = ARRAY_LEN( entityStateFields ); // all fields should be 32 bits to avoid any compiler packing issues // the "number" field is not part of the field list // if this assert fails, someone added a field to the entityState_t // struct without updating the message fields assert( numFields + 1 == sizeof( *from )/4 ); // a NULL to is a delta remove message if ( to == NULL ) { if ( from == NULL ) { return; } MSG_WriteBits( msg, from->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 1, 1 ); return; } if ( to->number < 0 || to->number >= MAX_GENTITIES ) { Com_Error (ERR_FATAL, "MSG_WriteDeltaEntity: Bad entity number: %i", to->number ); } lc = 0; // build the change vector as bytes so it is endien independent for ( i = 0, field = entityStateFields ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF != *toF ) { lc = i+1; } } if ( lc == 0 ) { // nothing at all changed if ( !force ) { return; // nothing at all } // write two bits for no change MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 0, 1 ); // no delta return; } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 1, 1 ); // we have a delta MSG_WriteByte( msg, lc ); // # of changes oldsize += numFields; for ( i = 0, field = entityStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF == *toF ) { MSG_WriteBits( msg, 0, 1 ); // no change continue; } MSG_WriteBits( msg, 1, 1 ); // changed if ( field->bits == 0 ) { // float fullFloat = *(float *)toF; trunc = (int)fullFloat; if (fullFloat == 0.0f) { MSG_WriteBits( msg, 0, 1 ); oldsize += FLOAT_INT_BITS; } else { MSG_WriteBits( msg, 1, 1 ); if ( trunc == fullFloat && trunc + FLOAT_INT_BIAS >= 0 && trunc + FLOAT_INT_BIAS < ( 1 << FLOAT_INT_BITS ) ) { // send as small integer MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, trunc + FLOAT_INT_BIAS, FLOAT_INT_BITS ); } else { // send as full floating point value MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, 32 ); } } } else { if (*toF == 0) { MSG_WriteBits( msg, 0, 1 ); } else { MSG_WriteBits( msg, 1, 1 ); // integer MSG_WriteBits( msg, *toF, field->bits ); } } } } /* ================== MSG_ReadDeltaEntity The entity number has already been read from the message, which is how the from state is identified. If the delta removes the entity, entityState_t->number will be set to MAX_GENTITIES-1 Can go from either a baseline or a previous packet_entity ================== */ void MSG_ReadDeltaEntity( msg_t *msg, entityState_t *from, entityState_t *to, int number) { int i, lc; int numFields; netField_t *field; int *fromF, *toF; int print; int trunc; int startBit, endBit; if ( number < 0 || number >= MAX_GENTITIES) { Com_Error( ERR_DROP, "Bad delta entity number: %i", number ); } if ( msg->bit == 0 ) { startBit = msg->readcount * 8 - GENTITYNUM_BITS; } else { startBit = ( msg->readcount - 1 ) * 8 + msg->bit - GENTITYNUM_BITS; } // check for a remove if ( MSG_ReadBits( msg, 1 ) == 1 ) { Com_Memset( to, 0, sizeof( *to ) ); to->number = MAX_GENTITIES - 1; if ( cl_shownet && ( cl_shownet->integer >= 2 || cl_shownet->integer == -1 ) ) { Com_Printf( "%3i: #%-3i remove\n", msg->readcount, number ); } return; } // check for no delta if ( MSG_ReadBits( msg, 1 ) == 0 ) { *to = *from; to->number = number; return; } numFields = ARRAY_LEN( entityStateFields ); lc = MSG_ReadByte(msg); if ( lc > numFields || lc < 0 ) { Com_Error( ERR_DROP, "invalid entityState field count" ); } // shownet 2/3 will interleave with other printed info, -1 will // just print the delta records` if ( cl_shownet && ( cl_shownet->integer >= 2 || cl_shownet->integer == -1 ) ) { print = 1; Com_Printf( "%3i: #%-3i ", msg->readcount, to->number ); } else { print = 0; } to->number = number; for ( i = 0, field = entityStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( ! MSG_ReadBits( msg, 1 ) ) { // no change *toF = *fromF; } else { if ( field->bits == 0 ) { // float if ( MSG_ReadBits( msg, 1 ) == 0 ) { *(float *)toF = 0.0f; } else { if ( MSG_ReadBits( msg, 1 ) == 0 ) { // integral float trunc = MSG_ReadBits( msg, FLOAT_INT_BITS ); // bias to allow equal parts positive and negative trunc -= FLOAT_INT_BIAS; *(float *)toF = trunc; if ( print ) { Com_Printf( "%s:%i ", field->name, trunc ); } } else { // full floating point value *toF = MSG_ReadBits( msg, 32 ); if ( print ) { Com_Printf( "%s:%f ", field->name, *(float *)toF ); } } } } else { if ( MSG_ReadBits( msg, 1 ) == 0 ) { *toF = 0; } else { // integer *toF = MSG_ReadBits( msg, field->bits ); if ( print ) { Com_Printf( "%s:%i ", field->name, *toF ); } } } // pcount[i]++; } } for ( i = lc, field = &entityStateFields[lc] ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); // no change *toF = *fromF; } if ( print ) { if ( msg->bit == 0 ) { endBit = msg->readcount * 8 - GENTITYNUM_BITS; } else { endBit = ( msg->readcount - 1 ) * 8 + msg->bit - GENTITYNUM_BITS; } Com_Printf( " (%i bits)\n", endBit - startBit ); } } /* ============================================================================ plyer_state_t communication ============================================================================ */ // using the stringizing operator to save typing... #define PSF(x) #x,(size_t)&((playerState_t*)0)->x netField_t playerStateFields[] = { { PSF(commandTime), 32 }, { PSF(origin[0]), 0 }, { PSF(origin[1]), 0 }, { PSF(bobCycle), 8 }, { PSF(velocity[0]), 0 }, { PSF(velocity[1]), 0 }, { PSF(viewangles[1]), 0 }, { PSF(viewangles[0]), 0 }, { PSF(weaponTime), -16 }, { PSF(origin[2]), 0 }, { PSF(velocity[2]), 0 }, { PSF(legsTimer), 8 }, { PSF(pm_time), -16 }, { PSF(eventSequence), 16 }, { PSF(torsoAnim), 8 }, { PSF(movementDir), 4 }, { PSF(events[0]), 8 }, { PSF(legsAnim), 8 }, { PSF(events[1]), 8 }, { PSF(pm_flags), 16 }, { PSF(groundEntityNum), GENTITYNUM_BITS }, { PSF(weaponstate), 4 }, { PSF(eFlags), 16 }, { PSF(externalEvent), 10 }, { PSF(gravity), 16 }, { PSF(speed), 16 }, { PSF(delta_angles[1]), 16 }, { PSF(externalEventParm), 8 }, { PSF(viewheight), -8 }, { PSF(damageEvent), 8 }, { PSF(damageYaw), 8 }, { PSF(damagePitch), 8 }, { PSF(damageCount), 8 }, { PSF(generic1), 8 }, { PSF(pm_type), 8 }, { PSF(delta_angles[0]), 16 }, { PSF(delta_angles[2]), 16 }, { PSF(torsoTimer), 12 }, { PSF(eventParms[0]), 8 }, { PSF(eventParms[1]), 8 }, { PSF(clientNum), 8 }, { PSF(weapon), 5 }, { PSF(viewangles[2]), 0 }, { PSF(grapplePoint[0]), 0 }, { PSF(grapplePoint[1]), 0 }, { PSF(grapplePoint[2]), 0 }, { PSF(jumppad_ent), GENTITYNUM_BITS }, { PSF(loopSound), 16 } }; /* ============= MSG_WriteDeltaPlayerstate ============= */ void MSG_WriteDeltaPlayerstate( msg_t *msg, struct playerState_s *from, struct playerState_s *to ) { int i; playerState_t dummy; int statsbits; int persistantbits; int ammobits; int powerupbits; int numFields; netField_t *field; int *fromF, *toF; float fullFloat; int trunc, lc; if (!from) { from = &dummy; Com_Memset (&dummy, 0, sizeof(dummy)); } numFields = ARRAY_LEN( playerStateFields ); lc = 0; for ( i = 0, field = playerStateFields ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF != *toF ) { lc = i+1; } } MSG_WriteByte( msg, lc ); // # of changes oldsize += numFields - lc; for ( i = 0, field = playerStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF == *toF ) { MSG_WriteBits( msg, 0, 1 ); // no change continue; } MSG_WriteBits( msg, 1, 1 ); // changed // pcount[i]++; if ( field->bits == 0 ) { // float fullFloat = *(float *)toF; trunc = (int)fullFloat; if ( trunc == fullFloat && trunc + FLOAT_INT_BIAS >= 0 && trunc + FLOAT_INT_BIAS < ( 1 << FLOAT_INT_BITS ) ) { // send as small integer MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, trunc + FLOAT_INT_BIAS, FLOAT_INT_BITS ); } else { // send as full floating point value MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, 32 ); } } else { // integer MSG_WriteBits( msg, *toF, field->bits ); } } // // send the arrays // statsbits = 0; for (i=0 ; i<MAX_STATS ; i++) { if (to->stats[i] != from->stats[i]) { statsbits |= 1<<i; } } persistantbits = 0; for (i=0 ; i<MAX_PERSISTANT ; i++) { if (to->persistant[i] != from->persistant[i]) { persistantbits |= 1<<i; } } ammobits = 0; for (i=0 ; i<MAX_WEAPONS ; i++) { if (to->ammo[i] != from->ammo[i]) { ammobits |= 1<<i; } } powerupbits = 0; for (i=0 ; i<MAX_POWERUPS ; i++) { if (to->powerups[i] != from->powerups[i]) { powerupbits |= 1<<i; } } if (!statsbits && !persistantbits && !ammobits && !powerupbits) { MSG_WriteBits( msg, 0, 1 ); // no change oldsize += 4; return; } MSG_WriteBits( msg, 1, 1 ); // changed if ( statsbits ) { MSG_WriteBits( msg, 1, 1 ); // changed MSG_WriteBits( msg, statsbits, MAX_STATS ); for (i=0 ; i<MAX_STATS ; i++) if (statsbits & (1<<i) ) MSG_WriteShort (msg, to->stats[i]); } else { MSG_WriteBits( msg, 0, 1 ); // no change } if ( persistantbits ) { MSG_WriteBits( msg, 1, 1 ); // changed MSG_WriteBits( msg, persistantbits, MAX_PERSISTANT ); for (i=0 ; i<MAX_PERSISTANT ; i++) if (persistantbits & (1<<i) ) MSG_WriteShort (msg, to->persistant[i]); } else { MSG_WriteBits( msg, 0, 1 ); // no change } if ( ammobits ) { MSG_WriteBits( msg, 1, 1 ); // changed MSG_WriteBits( msg, ammobits, MAX_WEAPONS ); for (i=0 ; i<MAX_WEAPONS ; i++) if (ammobits & (1<<i) ) MSG_WriteShort (msg, to->ammo[i]); } else { MSG_WriteBits( msg, 0, 1 ); // no change } if ( powerupbits ) { MSG_WriteBits( msg, 1, 1 ); // changed MSG_WriteBits( msg, powerupbits, MAX_POWERUPS ); for (i=0 ; i<MAX_POWERUPS ; i++) if (powerupbits & (1<<i) ) MSG_WriteLong( msg, to->powerups[i] ); } else { MSG_WriteBits( msg, 0, 1 ); // no change } } /* =================== MSG_ReadDeltaPlayerstate =================== */ void MSG_ReadDeltaPlayerstate (msg_t *msg, playerState_t *from, playerState_t *to ) { int i, lc; int bits; netField_t *field; int numFields; int startBit, endBit; int print; int *fromF, *toF; int trunc; playerState_t dummy; if ( !from ) { from = &dummy; Com_Memset( &dummy, 0, sizeof( dummy ) ); } *to = *from; if ( msg->bit == 0 ) { startBit = msg->readcount * 8 - GENTITYNUM_BITS; } else { startBit = ( msg->readcount - 1 ) * 8 + msg->bit - GENTITYNUM_BITS; } // shownet 2/3 will interleave with other printed info, -2 will // just print the delta records if ( cl_shownet && ( cl_shownet->integer >= 2 || cl_shownet->integer == -2 ) ) { print = 1; Com_Printf( "%3i: playerstate ", msg->readcount ); } else { print = 0; } numFields = ARRAY_LEN( playerStateFields ); lc = MSG_ReadByte(msg); if ( lc > numFields || lc < 0 ) { Com_Error( ERR_DROP, "invalid playerState field count" ); } for ( i = 0, field = playerStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( ! MSG_ReadBits( msg, 1 ) ) { // no change *toF = *fromF; } else { if ( field->bits == 0 ) { // float if ( MSG_ReadBits( msg, 1 ) == 0 ) { // integral float trunc = MSG_ReadBits( msg, FLOAT_INT_BITS ); // bias to allow equal parts positive and negative trunc -= FLOAT_INT_BIAS; *(float *)toF = trunc; if ( print ) { Com_Printf( "%s:%i ", field->name, trunc ); } } else { // full floating point value *toF = MSG_ReadBits( msg, 32 ); if ( print ) { Com_Printf( "%s:%f ", field->name, *(float *)toF ); } } } else { // integer *toF = MSG_ReadBits( msg, field->bits ); if ( print ) { Com_Printf( "%s:%i ", field->name, *toF ); } } } } for ( i=lc,field = &playerStateFields[lc];i<numFields; i++, field++) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); // no change *toF = *fromF; } // read the arrays if (MSG_ReadBits( msg, 1 ) ) { // parse stats if ( MSG_ReadBits( msg, 1 ) ) { LOG("PS_STATS"); bits = MSG_ReadBits (msg, MAX_STATS); for (i=0 ; i<MAX_STATS ; i++) { if (bits & (1<<i) ) { to->stats[i] = MSG_ReadShort(msg); } } } // parse persistant stats if ( MSG_ReadBits( msg, 1 ) ) { LOG("PS_PERSISTANT"); bits = MSG_ReadBits (msg, MAX_PERSISTANT); for (i=0 ; i<MAX_PERSISTANT ; i++) { if (bits & (1<<i) ) { to->persistant[i] = MSG_ReadShort(msg); } } } // parse ammo if ( MSG_ReadBits( msg, 1 ) ) { LOG("PS_AMMO"); bits = MSG_ReadBits (msg, MAX_WEAPONS); for (i=0 ; i<MAX_WEAPONS ; i++) { if (bits & (1<<i) ) { to->ammo[i] = MSG_ReadShort(msg); } } } // parse powerups if ( MSG_ReadBits( msg, 1 ) ) { LOG("PS_POWERUPS"); bits = MSG_ReadBits (msg, MAX_POWERUPS); for (i=0 ; i<MAX_POWERUPS ; i++) { if (bits & (1<<i) ) { to->powerups[i] = MSG_ReadLong(msg); } } } } if ( print ) { if ( msg->bit == 0 ) { endBit = msg->readcount * 8 - GENTITYNUM_BITS; } else { endBit = ( msg->readcount - 1 ) * 8 + msg->bit - GENTITYNUM_BITS; } Com_Printf( " (%i bits)\n", endBit - startBit ); } } int msg_hData[256] = { 250315, // 0 41193, // 1 6292, // 2 7106, // 3 3730, // 4 3750, // 5 6110, // 6 23283, // 7 33317, // 8 6950, // 9 7838, // 10 9714, // 11 9257, // 12 17259, // 13 3949, // 14 1778, // 15 8288, // 16 1604, // 17 1590, // 18 1663, // 19 1100, // 20 1213, // 21 1238, // 22 1134, // 23 1749, // 24 1059, // 25 1246, // 26 1149, // 27 1273, // 28 4486, // 29 2805, // 30 3472, // 31 21819, // 32 1159, // 33 1670, // 34 1066, // 35 1043, // 36 1012, // 37 1053, // 38 1070, // 39 1726, // 40 888, // 41 1180, // 42 850, // 43 960, // 44 780, // 45 1752, // 46 3296, // 47 10630, // 48 4514, // 49 5881, // 50 2685, // 51 4650, // 52 3837, // 53 2093, // 54 1867, // 55 2584, // 56 1949, // 57 1972, // 58 940, // 59 1134, // 60 1788, // 61 1670, // 62 1206, // 63 5719, // 64 6128, // 65 7222, // 66 6654, // 67 3710, // 68 3795, // 69 1492, // 70 1524, // 71 2215, // 72 1140, // 73 1355, // 74 971, // 75 2180, // 76 1248, // 77 1328, // 78 1195, // 79 1770, // 80 1078, // 81 1264, // 82 1266, // 83 1168, // 84 965, // 85 1155, // 86 1186, // 87 1347, // 88 1228, // 89 1529, // 90 1600, // 91 2617, // 92 2048, // 93 2546, // 94 3275, // 95 2410, // 96 3585, // 97 2504, // 98 2800, // 99 2675, // 100 6146, // 101 3663, // 102 2840, // 103 14253, // 104 3164, // 105 2221, // 106 1687, // 107 3208, // 108 2739, // 109 3512, // 110 4796, // 111 4091, // 112 3515, // 113 5288, // 114 4016, // 115 7937, // 116 6031, // 117 5360, // 118 3924, // 119 4892, // 120 3743, // 121 4566, // 122 4807, // 123 5852, // 124 6400, // 125 6225, // 126 8291, // 127 23243, // 128 7838, // 129 7073, // 130 8935, // 131 5437, // 132 4483, // 133 3641, // 134 5256, // 135 5312, // 136 5328, // 137 5370, // 138 3492, // 139 2458, // 140 1694, // 141 1821, // 142 2121, // 143 1916, // 144 1149, // 145 1516, // 146 1367, // 147 1236, // 148 1029, // 149 1258, // 150 1104, // 151 1245, // 152 1006, // 153 1149, // 154 1025, // 155 1241, // 156 952, // 157 1287, // 158 997, // 159 1713, // 160 1009, // 161 1187, // 162 879, // 163 1099, // 164 929, // 165 1078, // 166 951, // 167 1656, // 168 930, // 169 1153, // 170 1030, // 171 1262, // 172 1062, // 173 1214, // 174 1060, // 175 1621, // 176 930, // 177 1106, // 178 912, // 179 1034, // 180 892, // 181 1158, // 182 990, // 183 1175, // 184 850, // 185 1121, // 186 903, // 187 1087, // 188 920, // 189 1144, // 190 1056, // 191 3462, // 192 2240, // 193 4397, // 194 12136, // 195 7758, // 196 1345, // 197 1307, // 198 3278, // 199 1950, // 200 886, // 201 1023, // 202 1112, // 203 1077, // 204 1042, // 205 1061, // 206 1071, // 207 1484, // 208 1001, // 209 1096, // 210 915, // 211 1052, // 212 995, // 213 1070, // 214 876, // 215 1111, // 216 851, // 217 1059, // 218 805, // 219 1112, // 220 923, // 221 1103, // 222 817, // 223 1899, // 224 1872, // 225 976, // 226 841, // 227 1127, // 228 956, // 229 1159, // 230 950, // 231 7791, // 232 954, // 233 1289, // 234 933, // 235 1127, // 236 3207, // 237 1020, // 238 927, // 239 1355, // 240 768, // 241 1040, // 242 745, // 243 952, // 244 805, // 245 1073, // 246 740, // 247 1013, // 248 805, // 249 1008, // 250 796, // 251 996, // 252 1057, // 253 11457, // 254 13504, // 255 }; void MSG_initHuffman( void ) { int i,j; msgInit = qtrue; Huff_Init(&msgHuff); for(i=0;i<256;i++) { for (j=0;j<msg_hData[i];j++) { Huff_addRef(&msgHuff.compressor, (byte)i); // Do update Huff_addRef(&msgHuff.decompressor, (byte)i); // Do update } } } /* void MSG_NUinitHuffman() { byte *data; int size, i, ch; int array[256]; msgInit = qtrue; Huff_Init(&msgHuff); // load it in size = FS_ReadFile( "netchan/netchan.bin", (void **)&data ); for(i=0;i<256;i++) { array[i] = 0; } for(i=0;i<size;i++) { ch = data[i]; Huff_addRef(&msgHuff.compressor, ch); // Do update Huff_addRef(&msgHuff.decompressor, ch); // Do update array[ch]++; } Com_Printf("msg_hData {\n"); for(i=0;i<256;i++) { if (array[i] == 0) { Huff_addRef(&msgHuff.compressor, i); // Do update Huff_addRef(&msgHuff.decompressor, i); // Do update } Com_Printf("%d, // %d\n", array[i], i); } Com_Printf("};\n"); FS_FreeFile( data ); Cbuf_AddText( "condump dump.txt\n" ); } */ //===========================================================================
./CrossVul/dataset_final_sorted/CWE-119/c/good_2593_1
crossvul-cpp_data_bad_5740_2
/* * NET3: Garbage Collector For AF_UNIX sockets * * Garbage Collector: * Copyright (C) Barak A. Pearlmutter. * Released under the GPL version 2 or later. * * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. * If it doesn't work blame me, it worked when Barak sent it. * * Assumptions: * * - object w/ a bit * - free list * * Current optimizations: * * - explicit stack instead of recursion * - tail recurse on first born instead of immediate push/pop * - we gather the stuff that should not be killed into tree * and stack is just a path from root to the current pointer. * * Future optimizations: * * - don't just push entire root set; process in place * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. * Cope with changing max_files. * Al Viro 11 Oct 1998 * Graph may have cycles. That is, we can send the descriptor * of foo to bar and vice versa. Current code chokes on that. * Fix: move SCM_RIGHTS ones into the separate list and then * skb_free() them all instead of doing explicit fput's. * Another problem: since fput() may block somebody may * create a new unix_socket when we are in the middle of sweep * phase. Fix: revert the logic wrt MARKED. Mark everything * upon the beginning and unmark non-junk ones. * * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS * sent to connect()'ed but still not accept()'ed sockets. * Fixed. Old code had slightly different problem here: * extra fput() in situation when we passed the descriptor via * such socket and closed it (descriptor). That would happen on * each unix_gc() until the accept(). Since the struct file in * question would go to the free list and might be reused... * That might be the reason of random oopses on filp_close() * in unrelated processes. * * AV 28 Feb 1999 * Kill the explicit allocation of stack. Now we keep the tree * with root in dummy + pointer (gc_current) to one of the nodes. * Stack is represented as path from gc_current to dummy. Unmark * now means "add to tree". Push == "make it a son of gc_current". * Pop == "move gc_current to parent". We keep only pointers to * parents (->gc_tree). * AV 1 Mar 1999 * Damn. Added missing check for ->dead in listen queues scanning. * * Miklos Szeredi 25 Jun 2007 * Reimplement with a cycle collecting algorithm. This should * solve several problems with the previous code, like being racy * wrt receive and holding up unrelated socket operations. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/sock.h> #include <net/af_unix.h> #include <net/scm.h> #include <net/tcp_states.h> /* Internal data structures and random procedures: */ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = file_inode(filp); /* Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } /* Keep the number of times in flight count for the file * descriptor if it is for an AF_UNIX socket. */ void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; spin_unlock(&unix_gc_lock); } } void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; spin_unlock(&unix_gc_lock); } } static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { /* Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* Get the socket the fd matches if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { struct unix_sock *u = unix_sk(sk); /* Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { hit = true; func(u); } } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); } static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) { scan_inflight(x, func, hitlist); } else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); /* For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); /* An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); while (!list_empty(&embryos)) { u = list_entry(embryos.next, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } } static void dec_inflight(struct unix_sock *usk) { atomic_long_dec(&usk->inflight); } static void inc_inflight(struct unix_sock *usk) { atomic_long_inc(&usk->inflight); } static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) list_move_tail(&u->link, &gc_candidates); } static bool gc_in_progress; #define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } /* The external entry point: unix_gc() */ void unix_gc(void) { struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. Since there are no possible receivers, all * buffers currently on the candidates' queues stay there * during the garbage collection. * * We also know that no new candidate can be added onto the * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); } } /* Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &not_cycle_list); __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(&not_cycle_list)) { u = list_entry(not_cycle_list.next, struct unix_sock, link); __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); list_move_tail(&u->link, &gc_inflight_list); } /* Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5740_2
crossvul-cpp_data_bad_129_0
#include <math.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include "lua.h" #include "lauxlib.h" #define LUACMSGPACK_NAME "cmsgpack" #define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" #define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" #define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" #define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" /* Allows a preprocessor directive to override MAX_NESTING */ #ifndef LUACMSGPACK_MAX_NESTING #define LUACMSGPACK_MAX_NESTING 16 /* Max tables nesting. */ #endif /* Check if float or double can be an integer without loss of precision */ #define IS_INT_TYPE_EQUIVALENT(x, T) (!isinf(x) && (T)(x) == (x)) #define IS_INT64_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int64_t) #define IS_INT_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int) /* If size of pointer is equal to a 4 byte integer, we're on 32 bits. */ #if UINTPTR_MAX == UINT_MAX #define BITS_32 1 #else #define BITS_32 0 #endif #if BITS_32 #define lua_pushunsigned(L, n) lua_pushnumber(L, n) #else #define lua_pushunsigned(L, n) lua_pushinteger(L, n) #endif /* ============================================================================= * MessagePack implementation and bindings for Lua 5.1/5.2. * Copyright(C) 2012 Salvatore Sanfilippo <antirez@gmail.com> * * http://github.com/antirez/lua-cmsgpack * * For MessagePack specification check the following web site: * http://wiki.msgpack.org/display/MSGPACK/Format+specification * * See Copyright Notice at the end of this file. * * CHANGELOG: * 19-Feb-2012 (ver 0.1.0): Initial release. * 20-Feb-2012 (ver 0.2.0): Tables encoding improved. * 20-Feb-2012 (ver 0.2.1): Minor bug fixing. * 20-Feb-2012 (ver 0.3.0): Module renamed lua-cmsgpack (was lua-msgpack). * 04-Apr-2014 (ver 0.3.1): Lua 5.2 support and minor bug fix. * 07-Apr-2014 (ver 0.4.0): Multiple pack/unpack, lua allocator, efficiency. * ========================================================================== */ /* -------------------------- Endian conversion -------------------------------- * We use it only for floats and doubles, all the other conversions performed * in an endian independent fashion. So the only thing we need is a function * that swaps a binary string if arch is little endian (and left it untouched * otherwise). */ /* Reverse memory bytes if arch is little endian. Given the conceptual * simplicity of the Lua build system we prefer check for endianess at runtime. * The performance difference should be acceptable. */ void memrevifle(void *ptr, size_t len) { unsigned char *p = (unsigned char *)ptr, *e = (unsigned char *)p+len-1, aux; int test = 1; unsigned char *testp = (unsigned char*) &test; if (testp[0] == 0) return; /* Big endian, nothing to do. */ len /= 2; while(len--) { aux = *p; *p = *e; *e = aux; p++; e--; } } /* ---------------------------- String buffer ---------------------------------- * This is a simple implementation of string buffers. The only operation * supported is creating empty buffers and appending bytes to it. * The string buffer uses 2x preallocation on every realloc for O(N) append * behavior. */ typedef struct mp_buf { unsigned char *b; size_t len, free; } mp_buf; void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { void *(*local_realloc) (void *, void *, size_t osize, size_t nsize) = NULL; void *ud; local_realloc = lua_getallocf(L, &ud); return local_realloc(ud, target, osize, nsize); } mp_buf *mp_buf_new(lua_State *L) { mp_buf *buf = NULL; /* Old size = 0; new size = sizeof(*buf) */ buf = (mp_buf*)mp_realloc(L, NULL, 0, sizeof(*buf)); buf->b = NULL; buf->len = buf->free = 0; return buf; } void mp_buf_append(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { if (buf->free < len) { size_t newsize = (buf->len+len)*2; buf->b = (unsigned char*)mp_realloc(L, buf->b, buf->len + buf->free, newsize); buf->free = newsize - buf->len; } memcpy(buf->b+buf->len,s,len); buf->len += len; buf->free -= len; } void mp_buf_free(lua_State *L, mp_buf *buf) { mp_realloc(L, buf->b, buf->len + buf->free, 0); /* realloc to 0 = free */ mp_realloc(L, buf, sizeof(*buf), 0); } /* ---------------------------- String cursor ---------------------------------- * This simple data structure is used for parsing. Basically you create a cursor * using a string pointer and a length, then it is possible to access the * current string position with cursor->p, check the remaining length * in cursor->left, and finally consume more string using * mp_cur_consume(cursor,len), to advance 'p' and subtract 'left'. * An additional field cursor->error is set to zero on initialization and can * be used to report errors. */ #define MP_CUR_ERROR_NONE 0 #define MP_CUR_ERROR_EOF 1 /* Not enough data to complete operation. */ #define MP_CUR_ERROR_BADFMT 2 /* Bad data format */ typedef struct mp_cur { const unsigned char *p; size_t left; int err; } mp_cur; void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { cursor->p = s; cursor->left = len; cursor->err = MP_CUR_ERROR_NONE; } #define mp_cur_consume(_c,_len) do { _c->p += _len; _c->left -= _len; } while(0) /* When there is not enough room we set an error in the cursor and return. This * is very common across the code so we have a macro to make the code look * a bit simpler. */ #define mp_cur_need(_c,_len) do { \ if (_c->left < _len) { \ _c->err = MP_CUR_ERROR_EOF; \ return; \ } \ } while(0) /* ------------------------- Low level MP encoding -------------------------- */ void mp_encode_bytes(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { unsigned char hdr[5]; int hdrlen; if (len < 32) { hdr[0] = 0xa0 | (len&0xff); /* fix raw */ hdrlen = 1; } else if (len <= 0xff) { hdr[0] = 0xd9; hdr[1] = len; hdrlen = 2; } else if (len <= 0xffff) { hdr[0] = 0xda; hdr[1] = (len&0xff00)>>8; hdr[2] = len&0xff; hdrlen = 3; } else { hdr[0] = 0xdb; hdr[1] = (len&0xff000000)>>24; hdr[2] = (len&0xff0000)>>16; hdr[3] = (len&0xff00)>>8; hdr[4] = len&0xff; hdrlen = 5; } mp_buf_append(L,buf,hdr,hdrlen); mp_buf_append(L,buf,s,len); } /* we assume IEEE 754 internal format for single and double precision floats. */ void mp_encode_double(lua_State *L, mp_buf *buf, double d) { unsigned char b[9]; float f = d; assert(sizeof(f) == 4 && sizeof(d) == 8); if (d == (double)f) { b[0] = 0xca; /* float IEEE 754 */ memcpy(b+1,&f,4); memrevifle(b+1,4); mp_buf_append(L,buf,b,5); } else if (sizeof(d) == 8) { b[0] = 0xcb; /* double IEEE 754 */ memcpy(b+1,&d,8); memrevifle(b+1,8); mp_buf_append(L,buf,b,9); } } void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[9]; int enclen; if (n >= 0) { if (n <= 127) { b[0] = n & 0x7f; /* positive fixnum */ enclen = 1; } else if (n <= 0xff) { b[0] = 0xcc; /* uint 8 */ b[1] = n & 0xff; enclen = 2; } else if (n <= 0xffff) { b[0] = 0xcd; /* uint 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else if (n <= 0xffffffffLL) { b[0] = 0xce; /* uint 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } else { b[0] = 0xcf; /* uint 64 */ b[1] = (n & 0xff00000000000000LL) >> 56; b[2] = (n & 0xff000000000000LL) >> 48; b[3] = (n & 0xff0000000000LL) >> 40; b[4] = (n & 0xff00000000LL) >> 32; b[5] = (n & 0xff000000) >> 24; b[6] = (n & 0xff0000) >> 16; b[7] = (n & 0xff00) >> 8; b[8] = n & 0xff; enclen = 9; } } else { if (n >= -32) { b[0] = ((signed char)n); /* negative fixnum */ enclen = 1; } else if (n >= -128) { b[0] = 0xd0; /* int 8 */ b[1] = n & 0xff; enclen = 2; } else if (n >= -32768) { b[0] = 0xd1; /* int 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else if (n >= -2147483648LL) { b[0] = 0xd2; /* int 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } else { b[0] = 0xd3; /* int 64 */ b[1] = (n & 0xff00000000000000LL) >> 56; b[2] = (n & 0xff000000000000LL) >> 48; b[3] = (n & 0xff0000000000LL) >> 40; b[4] = (n & 0xff00000000LL) >> 32; b[5] = (n & 0xff000000) >> 24; b[6] = (n & 0xff0000) >> 16; b[7] = (n & 0xff00) >> 8; b[8] = n & 0xff; enclen = 9; } } mp_buf_append(L,buf,b,enclen); } void mp_encode_array(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; if (n <= 15) { b[0] = 0x90 | (n & 0xf); /* fix array */ enclen = 1; } else if (n <= 65535) { b[0] = 0xdc; /* array 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else { b[0] = 0xdd; /* array 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } mp_buf_append(L,buf,b,enclen); } void mp_encode_map(lua_State *L, mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; if (n <= 15) { b[0] = 0x80 | (n & 0xf); /* fix map */ enclen = 1; } else if (n <= 65535) { b[0] = 0xde; /* map 16 */ b[1] = (n & 0xff00) >> 8; b[2] = n & 0xff; enclen = 3; } else { b[0] = 0xdf; /* map 32 */ b[1] = (n & 0xff000000) >> 24; b[2] = (n & 0xff0000) >> 16; b[3] = (n & 0xff00) >> 8; b[4] = n & 0xff; enclen = 5; } mp_buf_append(L,buf,b,enclen); } /* --------------------------- Lua types encoding --------------------------- */ void mp_encode_lua_string(lua_State *L, mp_buf *buf) { size_t len; const char *s; s = lua_tolstring(L,-1,&len); mp_encode_bytes(L,buf,(const unsigned char*)s,len); } void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2; mp_buf_append(L,buf,&b,1); } /* Lua 5.3 has a built in 64-bit integer type */ void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { #if (LUA_VERSION_NUM < 503) && BITS_32 lua_Number i = lua_tonumber(L,-1); #else lua_Integer i = lua_tointeger(L,-1); #endif mp_encode_int(L, buf, (int64_t)i); } /* Lua 5.2 and lower only has 64-bit doubles, so we need to * detect if the double may be representable as an int * for Lua < 5.3 */ void mp_encode_lua_number(lua_State *L, mp_buf *buf) { lua_Number n = lua_tonumber(L,-1); if (IS_INT64_EQUIVALENT(n)) { mp_encode_lua_integer(L, buf); } else { mp_encode_double(L,buf,(double)n); } } void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); /* Convert a lua table into a message pack list. */ void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { #if LUA_VERSION_NUM < 502 size_t len = lua_objlen(L,-1), j; #else size_t len = lua_rawlen(L,-1), j; #endif mp_encode_array(L,buf,len); for (j = 1; j <= len; j++) { lua_pushnumber(L,j); lua_gettable(L,-2); mp_encode_lua_type(L,buf,level+1); } } /* Convert a lua table into a message pack key-value map. */ void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { size_t len = 0; /* First step: count keys into table. No other way to do it with the * Lua API, we need to iterate a first time. Note that an alternative * would be to do a single run, and then hack the buffer to insert the * map opcodes for message pack. Too hackish for this lib. */ lua_pushnil(L); while(lua_next(L,-2)) { lua_pop(L,1); /* remove value, keep key for next iteration. */ len++; } /* Step two: actually encoding of the map. */ mp_encode_map(L,buf,len); lua_pushnil(L); while(lua_next(L,-2)) { /* Stack: ... key value */ lua_pushvalue(L,-2); /* Stack: ... key value key */ mp_encode_lua_type(L,buf,level+1); /* encode key */ mp_encode_lua_type(L,buf,level+1); /* encode val */ } } /* Returns true if the Lua table on top of the stack is exclusively composed * of keys from numerical keys from 1 up to N, with N being the total number * of elements, without any hole in the middle. */ int table_is_an_array(lua_State *L) { int count = 0, max = 0; #if LUA_VERSION_NUM < 503 lua_Number n; #else lua_Integer n; #endif /* Stack top on function entry */ int stacktop; stacktop = lua_gettop(L); lua_pushnil(L); while(lua_next(L,-2)) { /* Stack: ... key value */ lua_pop(L,1); /* Stack: ... key */ /* The <= 0 check is valid here because we're comparing indexes. */ #if LUA_VERSION_NUM < 503 if ((LUA_TNUMBER != lua_type(L,-1)) || (n = lua_tonumber(L, -1)) <= 0 || !IS_INT_EQUIVALENT(n)) #else if (!lua_isinteger(L,-1) || (n = lua_tointeger(L, -1)) <= 0) #endif { lua_settop(L, stacktop); return 0; } max = (n > max ? n : max); count++; } /* We have the total number of elements in "count". Also we have * the max index encountered in "max". We can't reach this code * if there are indexes <= 0. If you also note that there can not be * repeated keys into a table, you have that if max==count you are sure * that there are all the keys form 1 to count (both included). */ lua_settop(L, stacktop); return max == count; } /* If the length operator returns non-zero, that is, there is at least * an object at key '1', we serialize to message pack list. Otherwise * we use a map. */ void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { if (table_is_an_array(L)) mp_encode_lua_table_as_array(L,buf,level); else mp_encode_lua_table_as_map(L,buf,level); } void mp_encode_lua_null(lua_State *L, mp_buf *buf) { unsigned char b[1]; b[0] = 0xc0; mp_buf_append(L,buf,b,1); } void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { int t = lua_type(L,-1); /* Limit the encoding of nested tables to a specified maximum depth, so that * we survive when called against circular references in tables. */ if (t == LUA_TTABLE && level == LUACMSGPACK_MAX_NESTING) t = LUA_TNIL; switch(t) { case LUA_TSTRING: mp_encode_lua_string(L,buf); break; case LUA_TBOOLEAN: mp_encode_lua_bool(L,buf); break; case LUA_TNUMBER: #if LUA_VERSION_NUM < 503 mp_encode_lua_number(L,buf); break; #else if (lua_isinteger(L, -1)) { mp_encode_lua_integer(L, buf); } else { mp_encode_lua_number(L, buf); } break; #endif case LUA_TTABLE: mp_encode_lua_table(L,buf,level); break; default: mp_encode_lua_null(L,buf); break; } lua_pop(L,1); } /* * Packs all arguments as a stream for multiple upacking later. * Returns error if no arguments provided. */ int mp_pack(lua_State *L) { int nargs = lua_gettop(L); int i; mp_buf *buf; if (nargs == 0) return luaL_argerror(L, 0, "MessagePack pack needs input."); if (!lua_checkstack(L, nargs)) return luaL_argerror(L, 0, "Too many arguments for MessagePack pack."); buf = mp_buf_new(L); for(i = 1; i <= nargs; i++) { /* Copy argument i to top of stack for _encode processing; * the encode function pops it from the stack when complete. */ lua_pushvalue(L, i); mp_encode_lua_type(L,buf,0); lua_pushlstring(L,(char*)buf->b,buf->len); /* Reuse the buffer for the next operation by * setting its free count to the total buffer size * and the current position to zero. */ buf->free += buf->len; buf->len = 0; } mp_buf_free(L, buf); /* Concatenate all nargs buffers together */ lua_concat(L, nargs); return 1; } /* ------------------------------- Decoding --------------------------------- */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c); void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) { assert(len <= UINT_MAX); int index = 1; lua_newtable(L); while(len--) { lua_pushnumber(L,index++); mp_decode_to_lua_type(L,c); if (c->err) return; lua_settable(L,-3); } } void mp_decode_to_lua_hash(lua_State *L, mp_cur *c, size_t len) { assert(len <= UINT_MAX); lua_newtable(L); while(len--) { mp_decode_to_lua_type(L,c); /* key */ if (c->err) return; mp_decode_to_lua_type(L,c); /* value */ if (c->err) return; lua_settable(L,-3); } } /* Decode a Message Pack raw object pointed by the string cursor 'c' to * a Lua type, that is left as the only result on the stack. */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { mp_cur_need(c,1); /* If we return more than 18 elements, we must resize the stack to * fit all our return values. But, there is no way to * determine how many objects a msgpack will unpack to up front, so * we request a +1 larger stack on each iteration (noop if stack is * big enough, and when stack does require resize it doubles in size) */ luaL_checkstack(L, 1, "too many return values at once; " "use unpack_one or unpack_limit instead."); switch(c->p[0]) { case 0xcc: /* uint 8 */ mp_cur_need(c,2); lua_pushunsigned(L,c->p[1]); mp_cur_consume(c,2); break; case 0xd0: /* int 8 */ mp_cur_need(c,2); lua_pushinteger(L,(signed char)c->p[1]); mp_cur_consume(c,2); break; case 0xcd: /* uint 16 */ mp_cur_need(c,3); lua_pushunsigned(L, (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xd1: /* int 16 */ mp_cur_need(c,3); lua_pushinteger(L,(int16_t) (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xce: /* uint 32 */ mp_cur_need(c,5); lua_pushunsigned(L, ((uint32_t)c->p[1] << 24) | ((uint32_t)c->p[2] << 16) | ((uint32_t)c->p[3] << 8) | (uint32_t)c->p[4]); mp_cur_consume(c,5); break; case 0xd2: /* int 32 */ mp_cur_need(c,5); lua_pushinteger(L, ((int32_t)c->p[1] << 24) | ((int32_t)c->p[2] << 16) | ((int32_t)c->p[3] << 8) | (int32_t)c->p[4]); mp_cur_consume(c,5); break; case 0xcf: /* uint 64 */ mp_cur_need(c,9); lua_pushunsigned(L, ((uint64_t)c->p[1] << 56) | ((uint64_t)c->p[2] << 48) | ((uint64_t)c->p[3] << 40) | ((uint64_t)c->p[4] << 32) | ((uint64_t)c->p[5] << 24) | ((uint64_t)c->p[6] << 16) | ((uint64_t)c->p[7] << 8) | (uint64_t)c->p[8]); mp_cur_consume(c,9); break; case 0xd3: /* int 64 */ mp_cur_need(c,9); #if LUA_VERSION_NUM < 503 lua_pushnumber(L, #else lua_pushinteger(L, #endif ((int64_t)c->p[1] << 56) | ((int64_t)c->p[2] << 48) | ((int64_t)c->p[3] << 40) | ((int64_t)c->p[4] << 32) | ((int64_t)c->p[5] << 24) | ((int64_t)c->p[6] << 16) | ((int64_t)c->p[7] << 8) | (int64_t)c->p[8]); mp_cur_consume(c,9); break; case 0xc0: /* nil */ lua_pushnil(L); mp_cur_consume(c,1); break; case 0xc3: /* true */ lua_pushboolean(L,1); mp_cur_consume(c,1); break; case 0xc2: /* false */ lua_pushboolean(L,0); mp_cur_consume(c,1); break; case 0xca: /* float */ mp_cur_need(c,5); assert(sizeof(float) == 4); { float f; memcpy(&f,c->p+1,4); memrevifle(&f,4); lua_pushnumber(L,f); mp_cur_consume(c,5); } break; case 0xcb: /* double */ mp_cur_need(c,9); assert(sizeof(double) == 8); { double d; memcpy(&d,c->p+1,8); memrevifle(&d,8); lua_pushnumber(L,d); mp_cur_consume(c,9); } break; case 0xd9: /* raw 8 */ mp_cur_need(c,2); { size_t l = c->p[1]; mp_cur_need(c,2+l); lua_pushlstring(L,(char*)c->p+2,l); mp_cur_consume(c,2+l); } break; case 0xda: /* raw 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_need(c,3+l); lua_pushlstring(L,(char*)c->p+3,l); mp_cur_consume(c,3+l); } break; case 0xdb: /* raw 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_cur_need(c,l); lua_pushlstring(L,(char*)c->p,l); mp_cur_consume(c,l); } break; case 0xdc: /* array 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_consume(c,3); mp_decode_to_lua_array(L,c,l); } break; case 0xdd: /* array 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_array(L,c,l); } break; case 0xde: /* map 16 */ mp_cur_need(c,3); { size_t l = (c->p[1] << 8) | c->p[2]; mp_cur_consume(c,3); mp_decode_to_lua_hash(L,c,l); } break; case 0xdf: /* map 32 */ mp_cur_need(c,5); { size_t l = ((size_t)c->p[1] << 24) | ((size_t)c->p[2] << 16) | ((size_t)c->p[3] << 8) | (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_hash(L,c,l); } break; default: /* types that can't be idenitified by first byte value. */ if ((c->p[0] & 0x80) == 0) { /* positive fixnum */ lua_pushunsigned(L,c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xe0) { /* negative fixnum */ lua_pushinteger(L,(signed char)c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xa0) { /* fix raw */ size_t l = c->p[0] & 0x1f; mp_cur_need(c,1+l); lua_pushlstring(L,(char*)c->p+1,l); mp_cur_consume(c,1+l); } else if ((c->p[0] & 0xf0) == 0x90) { /* fix map */ size_t l = c->p[0] & 0xf; mp_cur_consume(c,1); mp_decode_to_lua_array(L,c,l); } else if ((c->p[0] & 0xf0) == 0x80) { /* fix map */ size_t l = c->p[0] & 0xf; mp_cur_consume(c,1); mp_decode_to_lua_hash(L,c,l); } else { c->err = MP_CUR_ERROR_BADFMT; } } } int mp_unpack_full(lua_State *L, int limit, int offset) { size_t len; const char *s; mp_cur c; int cnt; /* Number of objects unpacked */ int decode_all = (!limit && !offset); s = luaL_checklstring(L,1,&len); /* if no match, exits */ if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */ return luaL_error(L, "Invalid request to unpack with offset of %d and limit of %d.", offset, len); else if (offset > len) return luaL_error(L, "Start offset %d greater than input length %d.", offset, len); if (decode_all) limit = INT_MAX; mp_cur_init(&c,(const unsigned char *)s+offset,len-offset); /* We loop over the decode because this could be a stream * of multiple top-level values serialized together */ for(cnt = 0; c.left > 0 && cnt < limit; cnt++) { mp_decode_to_lua_type(L,&c); if (c.err == MP_CUR_ERROR_EOF) { return luaL_error(L,"Missing bytes in input."); } else if (c.err == MP_CUR_ERROR_BADFMT) { return luaL_error(L,"Bad data format in input."); } } if (!decode_all) { /* c->left is the remaining size of the input buffer. * subtract the entire buffer size from the unprocessed size * to get our next start offset */ int offset = len - c.left; /* Return offset -1 when we have have processed the entire buffer. */ lua_pushinteger(L, c.left == 0 ? -1 : offset); /* Results are returned with the arg elements still * in place. Lua takes care of only returning * elements above the args for us. * In this case, we have one arg on the stack * for this function, so we insert our first return * value at position 2. */ lua_insert(L, 2); cnt += 1; /* increase return count by one to make room for offset */ } return cnt; } int mp_unpack(lua_State *L) { return mp_unpack_full(L, 0, 0); } int mp_unpack_one(lua_State *L) { int offset = luaL_optinteger(L, 2, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, 1, offset); } int mp_unpack_limit(lua_State *L) { int limit = luaL_checkinteger(L, 2); int offset = luaL_optinteger(L, 3, 0); /* Variable pop because offset may not exist */ lua_pop(L, lua_gettop(L)-1); return mp_unpack_full(L, limit, offset); } int mp_safe(lua_State *L) { int argc, err, total_results; argc = lua_gettop(L); /* This adds our function to the bottom of the stack * (the "call this function" position) */ lua_pushvalue(L, lua_upvalueindex(1)); lua_insert(L, 1); err = lua_pcall(L, argc, LUA_MULTRET, 0); total_results = lua_gettop(L); if (!err) { return total_results; } else { lua_pushnil(L); lua_insert(L,-2); return 2; } } /* -------------------------------------------------------------------------- */ const struct luaL_Reg cmds[] = { {"pack", mp_pack}, {"unpack", mp_unpack}, {"unpack_one", mp_unpack_one}, {"unpack_limit", mp_unpack_limit}, {0} }; int luaopen_create(lua_State *L) { int i; /* Manually construct our module table instead of * relying on _register or _newlib */ lua_newtable(L); for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { lua_pushcfunction(L, cmds[i].func); lua_setfield(L, -2, cmds[i].name); } /* Add metadata */ lua_pushliteral(L, LUACMSGPACK_NAME); lua_setfield(L, -2, "_NAME"); lua_pushliteral(L, LUACMSGPACK_VERSION); lua_setfield(L, -2, "_VERSION"); lua_pushliteral(L, LUACMSGPACK_COPYRIGHT); lua_setfield(L, -2, "_COPYRIGHT"); lua_pushliteral(L, LUACMSGPACK_DESCRIPTION); lua_setfield(L, -2, "_DESCRIPTION"); return 1; } LUALIB_API int luaopen_cmsgpack(lua_State *L) { luaopen_create(L); #if LUA_VERSION_NUM < 502 /* Register name globally for 5.1 */ lua_pushvalue(L, -1); lua_setglobal(L, LUACMSGPACK_NAME); #endif return 1; } LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) { int i; luaopen_cmsgpack(L); /* Wrap all functions in the safe handler */ for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { lua_getfield(L, -1, cmds[i].name); lua_pushcclosure(L, mp_safe, 1); lua_setfield(L, -2, cmds[i].name); } #if LUA_VERSION_NUM < 502 /* Register name globally for 5.1 */ lua_pushvalue(L, -1); lua_setglobal(L, LUACMSGPACK_SAFE_NAME); #endif return 1; } /****************************************************************************** * Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/
./CrossVul/dataset_final_sorted/CWE-119/c/bad_129_0
crossvul-cpp_data_good_2912_0
/* * Released under the GPLv2 only. * SPDX-License-Identifier: GPL-2.0 */ #include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/usb/hcd.h> #include <linux/usb/quirks.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/device.h> #include <asm/byteorder.h> #include "usb.h" #define USB_MAXALTSETTING 128 /* Hard limit */ #define USB_MAXCONFIG 8 /* Arbitrary limit */ static inline const char *plural(int n) { return (n == 1 ? "" : "s"); } static int find_next_descriptor(unsigned char *buffer, int size, int dt1, int dt2, int *num_skipped) { struct usb_descriptor_header *h; int n = 0; unsigned char *buffer0 = buffer; /* Find the next descriptor of type dt1 or dt2 */ while (size > 0) { h = (struct usb_descriptor_header *) buffer; if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2) break; buffer += h->bLength; size -= h->bLength; ++n; } /* Store the number of descriptors skipped and return the * number of bytes skipped */ if (num_skipped) *num_skipped = n; return buffer - buffer0; } static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) { struct usb_ssp_isoc_ep_comp_descriptor *desc; /* * The SuperSpeedPlus Isoc endpoint companion descriptor immediately * follows the SuperSpeed Endpoint Companion descriptor */ desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP || size < USB_DT_SSP_ISOC_EP_COMP_SIZE) { dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion" "for config %d interface %d altsetting %d ep %d.\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); return; } memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE); } static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) { struct usb_ss_ep_comp_descriptor *desc; int max_tx; /* The SuperSpeed endpoint companion descriptor is supposed to * be the first thing immediately following the endpoint descriptor. */ desc = (struct usb_ss_ep_comp_descriptor *) buffer; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || size < USB_DT_SS_EP_COMP_SIZE) { dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " " interface %d altsetting %d ep %d: " "using minimum values\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); /* Fill in some default values. * Leave bmAttributes as zero, which will mean no streams for * bulk, and isoc won't support multiple bursts of packets. * With bursts of only one packet, and a Mult of 1, the max * amount of data moved per endpoint service interval is one * packet. */ ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE; ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) ep->ss_ep_comp.wBytesPerInterval = ep->desc.wMaxPacketSize; return; } buffer += desc->bLength; size -= desc->bLength; memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); /* Check the various values */ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { dev_warn(ddev, "Control endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 0; } else if (desc->bMaxBurst > 15) { dev_warn(ddev, "Endpoint with bMaxBurst = %d in " "config %d interface %d altsetting %d ep %d: " "setting to 15\n", desc->bMaxBurst, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bMaxBurst = 15; } if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) && desc->bmAttributes != 0) { dev_warn(ddev, "%s endpoint with bmAttributes = %d in " "config %d interface %d altsetting %d ep %d: " "setting to zero\n", usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", desc->bmAttributes, cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 0; } else if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) { dev_warn(ddev, "Bulk endpoint with more than 65536 streams in " "config %d interface %d altsetting %d ep %d: " "setting to max\n", cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 16; } else if (usb_endpoint_xfer_isoc(&ep->desc) && !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) && USB_SS_MULT(desc->bmAttributes) > 3) { dev_warn(ddev, "Isoc endpoint has Mult of %d in " "config %d interface %d altsetting %d ep %d: " "setting to 3\n", USB_SS_MULT(desc->bmAttributes), cfgno, inum, asnum, ep->desc.bEndpointAddress); ep->ss_ep_comp.bmAttributes = 2; } if (usb_endpoint_xfer_isoc(&ep->desc)) max_tx = (desc->bMaxBurst + 1) * (USB_SS_MULT(desc->bmAttributes)) * usb_endpoint_maxp(&ep->desc); else if (usb_endpoint_xfer_int(&ep->desc)) max_tx = usb_endpoint_maxp(&ep->desc) * (desc->bMaxBurst + 1); else max_tx = 999999; if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " "config %d interface %d altsetting %d ep %d: " "setting to %d\n", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", le16_to_cpu(desc->wBytesPerInterval), cfgno, inum, asnum, ep->desc.bEndpointAddress, max_tx); ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); } /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */ if (usb_endpoint_xfer_isoc(&ep->desc) && USB_SS_SSP_ISOC_COMP(desc->bmAttributes)) usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum, ep, buffer, size); } static const unsigned short low_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 8, [USB_ENDPOINT_XFER_ISOC] = 0, [USB_ENDPOINT_XFER_BULK] = 0, [USB_ENDPOINT_XFER_INT] = 8, }; static const unsigned short full_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1023, [USB_ENDPOINT_XFER_BULK] = 64, [USB_ENDPOINT_XFER_INT] = 64, }; static const unsigned short high_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1024, [USB_ENDPOINT_XFER_BULK] = 512, [USB_ENDPOINT_XFER_INT] = 1024, }; static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 512, [USB_ENDPOINT_XFER_ISOC] = 1024, [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; unsigned int maxp; const unsigned short *maxpacket_maxes; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE) n = USB_DT_ENDPOINT_AUDIO_SIZE; else if (d->bLength >= USB_DT_ENDPOINT_SIZE) n = USB_DT_ENDPOINT_SIZE; else { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint descriptor of length %d, skipping\n", cfgno, inum, asnum, d->bLength); goto skip_to_next_endpoint_or_interface_descriptor; } i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK; if (i >= 16 || i == 0) { dev_warn(ddev, "config %d interface %d altsetting %d has an " "invalid endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } /* Only store as many endpoints as we have room for */ if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; /* Check for duplicate endpoint addresses */ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { if (ifp->endpoint[i].desc.bEndpointAddress == d->bEndpointAddress) { dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; } } endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ifp->desc.bNumEndpoints; memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* * Fix up bInterval values outside the legal range. * Use 10 or 8 ms if no proper value can be guessed. */ i = 0; /* i = min, j = max, n = default */ j = 255; if (usb_endpoint_xfer_int(d)) { i = 1; switch (to_usb_device(ddev)->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: case USB_SPEED_HIGH: /* * Many device manufacturers are using full-speed * bInterval values in high-speed interrupt endpoint * descriptors. Try to fix those and fall back to an * 8-ms default value otherwise. */ n = fls(d->bInterval*8); if (n == 0) n = 7; /* 8 ms = 2^(7-1) uframes */ j = 16; /* * Adjust bInterval for quirked devices. */ /* * This quirk fixes bIntervals reported in ms. */ if (to_usb_device(ddev)->quirks & USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval) + 3, i, j); i = j = n; } /* * This quirk fixes bIntervals reported in * linear microframes. */ if (to_usb_device(ddev)->quirks & USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) { n = clamp(fls(d->bInterval), i, j); i = j = n; } break; default: /* USB_SPEED_FULL or _LOW */ /* * For low-speed, 10 ms is the official minimum. * But some "overclocked" devices might want faster * polling so we'll allow it. */ n = 10; break; } } else if (usb_endpoint_xfer_isoc(d)) { i = 1; j = 16; switch (to_usb_device(ddev)->speed) { case USB_SPEED_HIGH: n = 7; /* 8 ms = 2^(7-1) uframes */ break; default: /* USB_SPEED_FULL */ n = 4; /* 8 ms = 2^(4-1) frames */ break; } } if (d->bInterval < i || d->bInterval > j) { dev_warn(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X has an invalid bInterval %d, " "changing to %d\n", cfgno, inum, asnum, d->bEndpointAddress, d->bInterval, n); endpoint->desc.bInterval = n; } /* Some buggy low-speed devices have Bulk endpoints, which is * explicitly forbidden by the USB spec. In an attempt to make * them usable, we will try treating them as Interrupt endpoints. */ if (to_usb_device(ddev)->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { dev_warn(ddev, "config %d interface %d altsetting %d " "endpoint 0x%X is Bulk; changing to Interrupt\n", cfgno, inum, asnum, d->bEndpointAddress); endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; endpoint->desc.bInterval = 1; if (usb_endpoint_maxp(&endpoint->desc) > 8) endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } /* Validate the wMaxPacketSize field */ maxp = usb_endpoint_maxp(&endpoint->desc); /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ switch (to_usb_device(ddev)->speed) { case USB_SPEED_LOW: maxpacket_maxes = low_speed_maxpacket_maxes; break; case USB_SPEED_FULL: maxpacket_maxes = full_speed_maxpacket_maxes; break; case USB_SPEED_HIGH: /* Bits 12..11 are allowed only for HS periodic endpoints */ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { i = maxp & (BIT(12) | BIT(11)); maxp &= ~i; } /* fallthrough */ default: maxpacket_maxes = high_speed_maxpacket_maxes; break; case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: maxpacket_maxes = super_speed_maxpacket_maxes; break; } j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; if (maxp > j) { dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp, j); maxp = j; endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); } /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not * be able to handle that particular bug, so let's warn... */ if (to_usb_device(ddev)->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", cfgno, inum, asnum, d->bEndpointAddress, maxp); } /* Parse a possible SuperSpeed endpoint companion descriptor */ if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER) usb_parse_ss_endpoint_companion(ddev, cfgno, inum, asnum, endpoint, buffer, size); /* Skip over any Class Specific or Vendor Specific descriptors; * find the next endpoint or interface descriptor */ endpoint->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); endpoint->extralen = i; retval = buffer - buffer0 + i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "endpoint"); return retval; skip_to_next_endpoint_or_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } void usb_release_interface_cache(struct kref *ref) { struct usb_interface_cache *intfc = ref_to_usb_interface_cache(ref); int j; for (j = 0; j < intfc->num_altsetting; j++) { struct usb_host_interface *alt = &intfc->altsetting[j]; kfree(alt->endpoint); kfree(alt->string); } kfree(intfc); } static int usb_parse_interface(struct device *ddev, int cfgno, struct usb_host_config *config, unsigned char *buffer, int size, u8 inums[], u8 nalts[]) { unsigned char *buffer0 = buffer; struct usb_interface_descriptor *d; int inum, asnum; struct usb_interface_cache *intfc; struct usb_host_interface *alt; int i, n; int len, retval; int num_ep, num_ep_orig; d = (struct usb_interface_descriptor *) buffer; buffer += d->bLength; size -= d->bLength; if (d->bLength < USB_DT_INTERFACE_SIZE) goto skip_to_next_interface_descriptor; /* Which interface entry is this? */ intfc = NULL; inum = d->bInterfaceNumber; for (i = 0; i < config->desc.bNumInterfaces; ++i) { if (inums[i] == inum) { intfc = config->intf_cache[i]; break; } } if (!intfc || intfc->num_altsetting >= nalts[i]) goto skip_to_next_interface_descriptor; /* Check for duplicate altsetting entries */ asnum = d->bAlternateSetting; for ((i = 0, alt = &intfc->altsetting[0]); i < intfc->num_altsetting; (++i, ++alt)) { if (alt->desc.bAlternateSetting == asnum) { dev_warn(ddev, "Duplicate descriptor for config %d " "interface %d altsetting %d, skipping\n", cfgno, inum, asnum); goto skip_to_next_interface_descriptor; } } ++intfc->num_altsetting; memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); /* Skip over any Class Specific or Vendor Specific descriptors; * find the first endpoint or interface descriptor */ alt->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, USB_DT_INTERFACE, &n); alt->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "interface"); buffer += i; size -= i; /* Allocate space for the right(?) number of endpoints */ num_ep = num_ep_orig = alt->desc.bNumEndpoints; alt->desc.bNumEndpoints = 0; /* Use as a counter */ if (num_ep > USB_MAXENDPOINTS) { dev_warn(ddev, "too many endpoints for config %d interface %d " "altsetting %d: %d, using maximum allowed: %d\n", cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); num_ep = USB_MAXENDPOINTS; } if (num_ep > 0) { /* Can't allocate 0 bytes */ len = sizeof(struct usb_host_endpoint) * num_ep; alt->endpoint = kzalloc(len, GFP_KERNEL); if (!alt->endpoint) return -ENOMEM; } /* Parse all the endpoint descriptors */ n = 0; while (size > 0) { if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; buffer += retval; size -= retval; } if (n != num_ep_orig) dev_warn(ddev, "config %d interface %d altsetting %d has %d " "endpoint descriptor%s, different from the interface " "descriptor's value: %d\n", cfgno, inum, asnum, n, plural(n), num_ep_orig); return buffer - buffer0; skip_to_next_interface_descriptor: i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, NULL); return buffer - buffer0 + i; } static int usb_parse_configuration(struct usb_device *dev, int cfgidx, struct usb_host_config *config, unsigned char *buffer, int size) { struct device *ddev = &dev->dev; unsigned char *buffer0 = buffer; int cfgno; int nintf, nintf_orig; int i, j, n; struct usb_interface_cache *intfc; unsigned char *buffer2; int size2; struct usb_descriptor_header *header; int len, retval; u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES]; unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); return -EINVAL; } cfgno = config->desc.bConfigurationValue; buffer += config->desc.bLength; size -= config->desc.bLength; nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", cfgno, nintf, USB_MAXINTERFACES); nintf = USB_MAXINTERFACES; } /* Go through the descriptors, checking their length and counting the * number of altsettings for each interface */ n = 0; for ((buffer2 = buffer, size2 = size); size2 > 0; (buffer2 += header->bLength, size2 -= header->bLength)) { if (size2 < sizeof(struct usb_descriptor_header)) { dev_warn(ddev, "config %d descriptor has %d excess " "byte%s, ignoring\n", cfgno, size2, plural(size2)); break; } header = (struct usb_descriptor_header *) buffer2; if ((header->bLength > size2) || (header->bLength < 2)) { dev_warn(ddev, "config %d has an invalid descriptor " "of length %d, skipping remainder of the config\n", cfgno, header->bLength); break; } if (header->bDescriptorType == USB_DT_INTERFACE) { struct usb_interface_descriptor *d; int inum; d = (struct usb_interface_descriptor *) header; if (d->bLength < USB_DT_INTERFACE_SIZE) { dev_warn(ddev, "config %d has an invalid " "interface descriptor of length %d, " "skipping\n", cfgno, d->bLength); continue; } inum = d->bInterfaceNumber; if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && n >= nintf_orig) { dev_warn(ddev, "config %d has more interface " "descriptors, than it declares in " "bNumInterfaces, ignoring interface " "number: %d\n", cfgno, inum); continue; } if (inum >= nintf_orig) dev_warn(ddev, "config %d has an invalid " "interface number: %d but max is %d\n", cfgno, inum, nintf_orig - 1); /* Have we already encountered this interface? * Count its altsettings */ for (i = 0; i < n; ++i) { if (inums[i] == inum) break; } if (i < n) { if (nalts[i] < 255) ++nalts[i]; } else if (n < USB_MAXINTERFACES) { inums[n] = inum; nalts[n] = 1; ++n; } } else if (header->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { struct usb_interface_assoc_descriptor *d; d = (struct usb_interface_assoc_descriptor *)header; if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { dev_warn(ddev, "config %d has an invalid interface association descriptor of length %d, skipping\n", cfgno, d->bLength); continue; } if (iad_num == USB_MAXIADS) { dev_warn(ddev, "found more Interface " "Association Descriptors " "than allocated for in " "configuration %d\n", cfgno); } else { config->intf_assoc[iad_num] = d; iad_num++; } } else if (header->bDescriptorType == USB_DT_DEVICE || header->bDescriptorType == USB_DT_CONFIG) dev_warn(ddev, "config %d contains an unexpected " "descriptor of type 0x%X, skipping\n", cfgno, header->bDescriptorType); } /* for ((buffer2 = buffer, size2 = size); ...) */ size = buffer2 - buffer; config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); if (n != nintf) dev_warn(ddev, "config %d has %d interface%s, different from " "the descriptor's value: %d\n", cfgno, n, plural(n), nintf_orig); else if (n == 0) dev_warn(ddev, "config %d has no interfaces?\n", cfgno); config->desc.bNumInterfaces = nintf = n; /* Check for missing interface numbers */ for (i = 0; i < nintf; ++i) { for (j = 0; j < nintf; ++j) { if (inums[j] == i) break; } if (j >= nintf) dev_warn(ddev, "config %d has no interface number " "%d\n", cfgno, i); } /* Allocate the usb_interface_caches and altsetting arrays */ for (i = 0; i < nintf; ++i) { j = nalts[i]; if (j > USB_MAXALTSETTING) { dev_warn(ddev, "too many alternate settings for " "config %d interface %d: %d, " "using maximum allowed: %d\n", cfgno, inums[i], j, USB_MAXALTSETTING); nalts[i] = j = USB_MAXALTSETTING; } len = sizeof(*intfc) + sizeof(struct usb_host_interface) * j; config->intf_cache[i] = intfc = kzalloc(len, GFP_KERNEL); if (!intfc) return -ENOMEM; kref_init(&intfc->ref); } /* FIXME: parse the BOS descriptor */ /* Skip over any Class Specific or Vendor Specific descriptors; * find the first interface descriptor */ config->extra = buffer; i = find_next_descriptor(buffer, size, USB_DT_INTERFACE, USB_DT_INTERFACE, &n); config->extralen = i; if (n > 0) dev_dbg(ddev, "skipped %d descriptor%s after %s\n", n, plural(n), "configuration"); buffer += i; size -= i; /* Parse all the interface/altsetting descriptors */ while (size > 0) { retval = usb_parse_interface(ddev, cfgno, config, buffer, size, inums, nalts); if (retval < 0) return retval; buffer += retval; size -= retval; } /* Check for missing altsettings */ for (i = 0; i < nintf; ++i) { intfc = config->intf_cache[i]; for (j = 0; j < intfc->num_altsetting; ++j) { for (n = 0; n < intfc->num_altsetting; ++n) { if (intfc->altsetting[n].desc. bAlternateSetting == j) break; } if (n >= intfc->num_altsetting) dev_warn(ddev, "config %d interface %d has no " "altsetting %d\n", cfgno, inums[i], j); } } return 0; } /* hub-only!! ... and only exported for reset/reinit path. * otherwise used internally on disconnect/destroy path */ void usb_destroy_configuration(struct usb_device *dev) { int c, i; if (!dev->config) return; if (dev->rawdescriptors) { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) kfree(dev->rawdescriptors[i]); kfree(dev->rawdescriptors); dev->rawdescriptors = NULL; } for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { struct usb_host_config *cf = &dev->config[c]; kfree(cf->string); for (i = 0; i < cf->desc.bNumInterfaces; i++) { if (cf->intf_cache[i]) kref_put(&cf->intf_cache[i]->ref, usb_release_interface_cache); } } kfree(dev->config); dev->config = NULL; } /* * Get the USB config descriptors, cache and parse'em * * hub-only!! ... and only in reset path, or usb_new_device() * (used by real hubs and virtual root hubs) */ int usb_get_configuration(struct usb_device *dev) { struct device *ddev = &dev->dev; int ncfg = dev->descriptor.bNumConfigurations; int result = 0; unsigned int cfgno, length; unsigned char *bigbuffer; struct usb_config_descriptor *desc; cfgno = 0; result = -ENOMEM; if (ncfg > USB_MAXCONFIG) { dev_warn(ddev, "too many configurations: %d, " "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } if (ncfg < 1) { dev_err(ddev, "no configurations\n"); return -EINVAL; } length = ncfg * sizeof(struct usb_host_config); dev->config = kzalloc(length, GFP_KERNEL); if (!dev->config) goto err2; length = ncfg * sizeof(char *); dev->rawdescriptors = kzalloc(length, GFP_KERNEL); if (!dev->rawdescriptors) goto err2; desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); if (!desc) goto err2; result = 0; for (; cfgno < ncfg; cfgno++) { /* We grab just the first descriptor so we know how long * the whole configuration is */ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, desc, USB_DT_CONFIG_SIZE); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s: %d\n", cfgno, "start", result); if (result != -EPIPE) goto err; dev_err(ddev, "chopping to %d config(s)\n", cfgno); dev->descriptor.bNumConfigurations = cfgno; break; } else if (result < 4) { dev_err(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, USB_DT_CONFIG_SIZE, result); result = -EINVAL; goto err; } length = max((int) le16_to_cpu(desc->wTotalLength), USB_DT_CONFIG_SIZE); /* Now that we know the length, get the whole thing */ bigbuffer = kmalloc(length, GFP_KERNEL); if (!bigbuffer) { result = -ENOMEM; goto err; } if (dev->quirks & USB_QUIRK_DELAY_INIT) msleep(200); result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { dev_err(ddev, "unable to read config index %d " "descriptor/%s\n", cfgno, "all"); kfree(bigbuffer); goto err; } if (result < length) { dev_warn(ddev, "config index %d descriptor too short " "(expected %i, got %i)\n", cfgno, length, result); length = result; } dev->rawdescriptors[cfgno] = bigbuffer; result = usb_parse_configuration(dev, cfgno, &dev->config[cfgno], bigbuffer, length); if (result < 0) { ++cfgno; goto err; } } result = 0; err: kfree(desc); dev->descriptor.bNumConfigurations = cfgno; err2: if (result == -ENOMEM) dev_err(ddev, "out of memory\n"); return result; } void usb_release_bos_descriptor(struct usb_device *dev) { if (dev->bos) { kfree(dev->bos->desc); kfree(dev->bos); dev->bos = NULL; } } /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; unsigned char *buffer; int length, total_len, num, i; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); if (!bos) return -ENOMEM; /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); if (ret < USB_DT_BOS_SIZE) { dev_err(ddev, "unable to get BOS descriptor\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); return ret; } length = bos->bLength; total_len = le16_to_cpu(bos->wTotalLength); num = bos->bNumDeviceCaps; kfree(bos); if (total_len < length) return -EINVAL; dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL); if (!dev->bos) return -ENOMEM; /* Now let's get the whole BOS descriptor set */ buffer = kzalloc(total_len, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err; } dev->bos->desc = (struct usb_bos_descriptor *)buffer; ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len); if (ret < total_len) { dev_err(ddev, "unable to get BOS descriptor set\n"); if (ret >= 0) ret = -ENOMSG; goto err; } total_len -= length; for (i = 0; i < num; i++) { buffer += length; cap = (struct usb_dev_cap_header *)buffer; length = cap->bLength; if (total_len < length) break; total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_warn(ddev, "descriptor type invalid, skip\n"); continue; } switch (cap->bDevCapabilityType) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; case USB_CAP_TYPE_EXT: dev->bos->ext_cap = (struct usb_ext_cap_descriptor *)buffer; break; case USB_SS_CAP_TYPE: dev->bos->ss_cap = (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: dev->bos->ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = (struct usb_ss_container_id_descriptor *)buffer; break; case USB_PTM_CAP_TYPE: dev->bos->ptm_cap = (struct usb_ptm_cap_descriptor *)buffer; default: break; } } return 0; err: usb_release_bos_descriptor(dev); return ret; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_2912_0
crossvul-cpp_data_bad_5035_0
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2011 Andres Mejia * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" #ifdef HAVE_ERRNO_H #include <errno.h> #endif #include <time.h> #include <limits.h> #ifdef HAVE_ZLIB_H #include <zlib.h> /* crc32 */ #endif #include "archive.h" #ifndef HAVE_ZLIB_H #include "archive_crc32.h" #endif #include "archive_endian.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_ppmd7_private.h" #include "archive_private.h" #include "archive_read_private.h" /* RAR signature, also known as the mark header */ #define RAR_SIGNATURE "\x52\x61\x72\x21\x1A\x07\x00" /* Header types */ #define MARK_HEAD 0x72 #define MAIN_HEAD 0x73 #define FILE_HEAD 0x74 #define COMM_HEAD 0x75 #define AV_HEAD 0x76 #define SUB_HEAD 0x77 #define PROTECT_HEAD 0x78 #define SIGN_HEAD 0x79 #define NEWSUB_HEAD 0x7a #define ENDARC_HEAD 0x7b /* Main Header Flags */ #define MHD_VOLUME 0x0001 #define MHD_COMMENT 0x0002 #define MHD_LOCK 0x0004 #define MHD_SOLID 0x0008 #define MHD_NEWNUMBERING 0x0010 #define MHD_AV 0x0020 #define MHD_PROTECT 0x0040 #define MHD_PASSWORD 0x0080 #define MHD_FIRSTVOLUME 0x0100 #define MHD_ENCRYPTVER 0x0200 /* Flags common to all headers */ #define HD_MARKDELETION 0x4000 #define HD_ADD_SIZE_PRESENT 0x8000 /* File Header Flags */ #define FHD_SPLIT_BEFORE 0x0001 #define FHD_SPLIT_AFTER 0x0002 #define FHD_PASSWORD 0x0004 #define FHD_COMMENT 0x0008 #define FHD_SOLID 0x0010 #define FHD_LARGE 0x0100 #define FHD_UNICODE 0x0200 #define FHD_SALT 0x0400 #define FHD_VERSION 0x0800 #define FHD_EXTTIME 0x1000 #define FHD_EXTFLAGS 0x2000 /* File dictionary sizes */ #define DICTIONARY_SIZE_64 0x00 #define DICTIONARY_SIZE_128 0x20 #define DICTIONARY_SIZE_256 0x40 #define DICTIONARY_SIZE_512 0x60 #define DICTIONARY_SIZE_1024 0x80 #define DICTIONARY_SIZE_2048 0xA0 #define DICTIONARY_SIZE_4096 0xC0 #define FILE_IS_DIRECTORY 0xE0 #define DICTIONARY_MASK FILE_IS_DIRECTORY /* OS Flags */ #define OS_MSDOS 0 #define OS_OS2 1 #define OS_WIN32 2 #define OS_UNIX 3 #define OS_MAC_OS 4 #define OS_BEOS 5 /* Compression Methods */ #define COMPRESS_METHOD_STORE 0x30 /* LZSS */ #define COMPRESS_METHOD_FASTEST 0x31 #define COMPRESS_METHOD_FAST 0x32 #define COMPRESS_METHOD_NORMAL 0x33 /* PPMd Variant H */ #define COMPRESS_METHOD_GOOD 0x34 #define COMPRESS_METHOD_BEST 0x35 #define CRC_POLYNOMIAL 0xEDB88320 #define NS_UNIT 10000000 #define DICTIONARY_MAX_SIZE 0x400000 #define MAINCODE_SIZE 299 #define OFFSETCODE_SIZE 60 #define LOWOFFSETCODE_SIZE 17 #define LENGTHCODE_SIZE 28 #define HUFFMAN_TABLE_SIZE \ MAINCODE_SIZE + OFFSETCODE_SIZE + LOWOFFSETCODE_SIZE + LENGTHCODE_SIZE #define MAX_SYMBOL_LENGTH 0xF #define MAX_SYMBOLS 20 /* * Considering L1,L2 cache miss and a calling of write system-call, * the best size of the output buffer(uncompressed buffer) is 128K. * If the structure of extracting process is changed, this value * might be researched again. */ #define UNP_BUFFER_SIZE (128 * 1024) /* Define this here for non-Windows platforms */ #if !((defined(__WIN32__) || defined(_WIN32) || defined(__WIN32)) && !defined(__CYGWIN__)) #define FILE_ATTRIBUTE_DIRECTORY 0x10 #endif /* Fields common to all headers */ struct rar_header { char crc[2]; char type; char flags[2]; char size[2]; }; /* Fields common to all file headers */ struct rar_file_header { char pack_size[4]; char unp_size[4]; char host_os; char file_crc[4]; char file_time[4]; char unp_ver; char method; char name_size[2]; char file_attr[4]; }; struct huffman_tree_node { int branches[2]; }; struct huffman_table_entry { unsigned int length; int value; }; struct huffman_code { struct huffman_tree_node *tree; int numentries; int numallocatedentries; int minlength; int maxlength; int tablesize; struct huffman_table_entry *table; }; struct lzss { unsigned char *window; int mask; int64_t position; }; struct data_block_offsets { int64_t header_size; int64_t start_offset; int64_t end_offset; }; struct rar { /* Entries from main RAR header */ unsigned main_flags; unsigned long file_crc; char reserved1[2]; char reserved2[4]; char encryptver; /* File header entries */ char compression_method; unsigned file_flags; int64_t packed_size; int64_t unp_size; time_t mtime; long mnsec; mode_t mode; char *filename; char *filename_save; size_t filename_save_size; size_t filename_allocated; /* File header optional entries */ char salt[8]; time_t atime; long ansec; time_t ctime; long cnsec; time_t arctime; long arcnsec; /* Fields to help with tracking decompression of files. */ int64_t bytes_unconsumed; int64_t bytes_remaining; int64_t bytes_uncopied; int64_t offset; int64_t offset_outgoing; int64_t offset_seek; char valid; unsigned int unp_offset; unsigned int unp_buffer_size; unsigned char *unp_buffer; unsigned int dictionary_size; char start_new_block; char entry_eof; unsigned long crc_calculated; int found_first_header; char has_endarc_header; struct data_block_offsets *dbo; unsigned int cursor; unsigned int nodes; /* LZSS members */ struct huffman_code maincode; struct huffman_code offsetcode; struct huffman_code lowoffsetcode; struct huffman_code lengthcode; unsigned char lengthtable[HUFFMAN_TABLE_SIZE]; struct lzss lzss; char output_last_match; unsigned int lastlength; unsigned int lastoffset; unsigned int oldoffset[4]; unsigned int lastlowoffset; unsigned int numlowoffsetrepeats; int64_t filterstart; char start_new_table; /* PPMd Variant H members */ char ppmd_valid; char ppmd_eod; char is_ppmd_block; int ppmd_escape; CPpmd7 ppmd7_context; CPpmd7z_RangeDec range_dec; IByteIn bytein; /* * String conversion object. */ int init_default_conversion; struct archive_string_conv *sconv_default; struct archive_string_conv *opt_sconv; struct archive_string_conv *sconv_utf8; struct archive_string_conv *sconv_utf16be; /* * Bit stream reader. */ struct rar_br { #define CACHE_TYPE uint64_t #define CACHE_BITS (8 * sizeof(CACHE_TYPE)) /* Cache buffer. */ CACHE_TYPE cache_buffer; /* Indicates how many bits avail in cache_buffer. */ int cache_avail; ssize_t avail_in; const unsigned char *next_in; } br; /* * Custom field to denote that this archive contains encrypted entries */ int has_encrypted_entries; }; static int archive_read_support_format_rar_capabilities(struct archive_read *); static int archive_read_format_rar_has_encrypted_entries(struct archive_read *); static int archive_read_format_rar_bid(struct archive_read *, int); static int archive_read_format_rar_options(struct archive_read *, const char *, const char *); static int archive_read_format_rar_read_header(struct archive_read *, struct archive_entry *); static int archive_read_format_rar_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_rar_read_data_skip(struct archive_read *a); static int64_t archive_read_format_rar_seek_data(struct archive_read *, int64_t, int); static int archive_read_format_rar_cleanup(struct archive_read *); /* Support functions */ static int read_header(struct archive_read *, struct archive_entry *, char); static time_t get_time(int); static int read_exttime(const char *, struct rar *, const char *); static int read_symlink_stored(struct archive_read *, struct archive_entry *, struct archive_string_conv *); static int read_data_stored(struct archive_read *, const void **, size_t *, int64_t *); static int read_data_compressed(struct archive_read *, const void **, size_t *, int64_t *); static int rar_br_preparation(struct archive_read *, struct rar_br *); static int parse_codes(struct archive_read *); static void free_codes(struct archive_read *); static int read_next_symbol(struct archive_read *, struct huffman_code *); static int create_code(struct archive_read *, struct huffman_code *, unsigned char *, int, char); static int add_value(struct archive_read *, struct huffman_code *, int, int, int); static int new_node(struct huffman_code *); static int make_table(struct archive_read *, struct huffman_code *); static int make_table_recurse(struct archive_read *, struct huffman_code *, int, struct huffman_table_entry *, int, int); static int64_t expand(struct archive_read *, int64_t); static int copy_from_lzss_window(struct archive_read *, const void **, int64_t, int); static const void *rar_read_ahead(struct archive_read *, size_t, ssize_t *); /* * Bit stream reader. */ /* Check that the cache buffer has enough bits. */ #define rar_br_has(br, n) ((br)->cache_avail >= n) /* Get compressed data by bit. */ #define rar_br_bits(br, n) \ (((uint32_t)((br)->cache_buffer >> \ ((br)->cache_avail - (n)))) & cache_masks[n]) #define rar_br_bits_forced(br, n) \ (((uint32_t)((br)->cache_buffer << \ ((n) - (br)->cache_avail))) & cache_masks[n]) /* Read ahead to make sure the cache buffer has enough compressed data we * will use. * True : completed, there is enough data in the cache buffer. * False : there is no data in the stream. */ #define rar_br_read_ahead(a, br, n) \ ((rar_br_has(br, (n)) || rar_br_fillup(a, br)) || rar_br_has(br, (n))) /* Notify how many bits we consumed. */ #define rar_br_consume(br, n) ((br)->cache_avail -= (n)) #define rar_br_consume_unalined_bits(br) ((br)->cache_avail &= ~7) static const uint32_t cache_masks[] = { 0x00000000, 0x00000001, 0x00000003, 0x00000007, 0x0000000F, 0x0000001F, 0x0000003F, 0x0000007F, 0x000000FF, 0x000001FF, 0x000003FF, 0x000007FF, 0x00000FFF, 0x00001FFF, 0x00003FFF, 0x00007FFF, 0x0000FFFF, 0x0001FFFF, 0x0003FFFF, 0x0007FFFF, 0x000FFFFF, 0x001FFFFF, 0x003FFFFF, 0x007FFFFF, 0x00FFFFFF, 0x01FFFFFF, 0x03FFFFFF, 0x07FFFFFF, 0x0FFFFFFF, 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; /* * Shift away used bits in the cache data and fill it up with following bits. * Call this when cache buffer does not have enough bits you need. * * Returns 1 if the cache buffer is full. * Returns 0 if the cache buffer is not full; input buffer is empty. */ static int rar_br_fillup(struct archive_read *a, struct rar_br *br) { struct rar *rar = (struct rar *)(a->format->data); int n = CACHE_BITS - br->cache_avail; for (;;) { switch (n >> 3) { case 8: if (br->avail_in >= 8) { br->cache_buffer = ((uint64_t)br->next_in[0]) << 56 | ((uint64_t)br->next_in[1]) << 48 | ((uint64_t)br->next_in[2]) << 40 | ((uint64_t)br->next_in[3]) << 32 | ((uint32_t)br->next_in[4]) << 24 | ((uint32_t)br->next_in[5]) << 16 | ((uint32_t)br->next_in[6]) << 8 | (uint32_t)br->next_in[7]; br->next_in += 8; br->avail_in -= 8; br->cache_avail += 8 * 8; rar->bytes_unconsumed += 8; rar->bytes_remaining -= 8; return (1); } break; case 7: if (br->avail_in >= 7) { br->cache_buffer = (br->cache_buffer << 56) | ((uint64_t)br->next_in[0]) << 48 | ((uint64_t)br->next_in[1]) << 40 | ((uint64_t)br->next_in[2]) << 32 | ((uint32_t)br->next_in[3]) << 24 | ((uint32_t)br->next_in[4]) << 16 | ((uint32_t)br->next_in[5]) << 8 | (uint32_t)br->next_in[6]; br->next_in += 7; br->avail_in -= 7; br->cache_avail += 7 * 8; rar->bytes_unconsumed += 7; rar->bytes_remaining -= 7; return (1); } break; case 6: if (br->avail_in >= 6) { br->cache_buffer = (br->cache_buffer << 48) | ((uint64_t)br->next_in[0]) << 40 | ((uint64_t)br->next_in[1]) << 32 | ((uint32_t)br->next_in[2]) << 24 | ((uint32_t)br->next_in[3]) << 16 | ((uint32_t)br->next_in[4]) << 8 | (uint32_t)br->next_in[5]; br->next_in += 6; br->avail_in -= 6; br->cache_avail += 6 * 8; rar->bytes_unconsumed += 6; rar->bytes_remaining -= 6; return (1); } break; case 0: /* We have enough compressed data in * the cache buffer.*/ return (1); default: break; } if (br->avail_in <= 0) { if (rar->bytes_unconsumed > 0) { /* Consume as much as the decompressor * actually used. */ __archive_read_consume(a, rar->bytes_unconsumed); rar->bytes_unconsumed = 0; } br->next_in = rar_read_ahead(a, 1, &(br->avail_in)); if (br->next_in == NULL) return (0); if (br->avail_in == 0) return (0); } br->cache_buffer = (br->cache_buffer << 8) | *br->next_in++; br->avail_in--; br->cache_avail += 8; n -= 8; rar->bytes_unconsumed++; rar->bytes_remaining--; } } static int rar_br_preparation(struct archive_read *a, struct rar_br *br) { struct rar *rar = (struct rar *)(a->format->data); if (rar->bytes_remaining > 0) { br->next_in = rar_read_ahead(a, 1, &(br->avail_in)); if (br->next_in == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); return (ARCHIVE_FATAL); } if (br->cache_avail == 0) (void)rar_br_fillup(a, br); } return (ARCHIVE_OK); } /* Find last bit set */ static inline int rar_fls(unsigned int word) { word |= (word >> 1); word |= (word >> 2); word |= (word >> 4); word |= (word >> 8); word |= (word >> 16); return word - (word >> 1); } /* LZSS functions */ static inline int64_t lzss_position(struct lzss *lzss) { return lzss->position; } static inline int lzss_mask(struct lzss *lzss) { return lzss->mask; } static inline int lzss_size(struct lzss *lzss) { return lzss->mask + 1; } static inline int lzss_offset_for_position(struct lzss *lzss, int64_t pos) { return (int)(pos & lzss->mask); } static inline unsigned char * lzss_pointer_for_position(struct lzss *lzss, int64_t pos) { return &lzss->window[lzss_offset_for_position(lzss, pos)]; } static inline int lzss_current_offset(struct lzss *lzss) { return lzss_offset_for_position(lzss, lzss->position); } static inline uint8_t * lzss_current_pointer(struct lzss *lzss) { return lzss_pointer_for_position(lzss, lzss->position); } static inline void lzss_emit_literal(struct rar *rar, uint8_t literal) { *lzss_current_pointer(&rar->lzss) = literal; rar->lzss.position++; } static inline void lzss_emit_match(struct rar *rar, int offset, int length) { int dstoffs = lzss_current_offset(&rar->lzss); int srcoffs = (dstoffs - offset) & lzss_mask(&rar->lzss); int l, li, remaining; unsigned char *d, *s; remaining = length; while (remaining > 0) { l = remaining; if (dstoffs > srcoffs) { if (l > lzss_size(&rar->lzss) - dstoffs) l = lzss_size(&rar->lzss) - dstoffs; } else { if (l > lzss_size(&rar->lzss) - srcoffs) l = lzss_size(&rar->lzss) - srcoffs; } d = &(rar->lzss.window[dstoffs]); s = &(rar->lzss.window[srcoffs]); if ((dstoffs + l < srcoffs) || (srcoffs + l < dstoffs)) memcpy(d, s, l); else { for (li = 0; li < l; li++) d[li] = s[li]; } remaining -= l; dstoffs = (dstoffs + l) & lzss_mask(&(rar->lzss)); srcoffs = (srcoffs + l) & lzss_mask(&(rar->lzss)); } rar->lzss.position += length; } static void * ppmd_alloc(void *p, size_t size) { (void)p; return malloc(size); } static void ppmd_free(void *p, void *address) { (void)p; free(address); } static ISzAlloc g_szalloc = { ppmd_alloc, ppmd_free }; static Byte ppmd_read(void *p) { struct archive_read *a = ((IByteIn*)p)->a; struct rar *rar = (struct rar *)(a->format->data); struct rar_br *br = &(rar->br); Byte b; if (!rar_br_read_ahead(a, br, 8)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); rar->valid = 0; return 0; } b = rar_br_bits(br, 8); rar_br_consume(br, 8); return b; } int archive_read_support_format_rar(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct rar *rar; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_rar"); rar = (struct rar *)malloc(sizeof(*rar)); if (rar == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate rar data"); return (ARCHIVE_FATAL); } memset(rar, 0, sizeof(*rar)); /* * Until enough data has been read, we cannot tell about * any encrypted entries yet. */ rar->has_encrypted_entries = ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; r = __archive_read_register_format(a, rar, "rar", archive_read_format_rar_bid, archive_read_format_rar_options, archive_read_format_rar_read_header, archive_read_format_rar_read_data, archive_read_format_rar_read_data_skip, archive_read_format_rar_seek_data, archive_read_format_rar_cleanup, archive_read_support_format_rar_capabilities, archive_read_format_rar_has_encrypted_entries); if (r != ARCHIVE_OK) free(rar); return (r); } static int archive_read_support_format_rar_capabilities(struct archive_read * a) { (void)a; /* UNUSED */ return (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA | ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA); } static int archive_read_format_rar_has_encrypted_entries(struct archive_read *_a) { if (_a && _a->format) { struct rar * rar = (struct rar *)_a->format->data; if (rar) { return rar->has_encrypted_entries; } } return ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; } static int archive_read_format_rar_bid(struct archive_read *a, int best_bid) { const char *p; /* If there's already a bid > 30, we'll never win. */ if (best_bid > 30) return (-1); if ((p = __archive_read_ahead(a, 7, NULL)) == NULL) return (-1); if (memcmp(p, RAR_SIGNATURE, 7) == 0) return (30); if ((p[0] == 'M' && p[1] == 'Z') || memcmp(p, "\x7F\x45LF", 4) == 0) { /* This is a PE file */ ssize_t offset = 0x10000; ssize_t window = 4096; ssize_t bytes_avail; while (offset + window <= (1024 * 128)) { const char *buff = __archive_read_ahead(a, offset + window, &bytes_avail); if (buff == NULL) { /* Remaining bytes are less than window. */ window >>= 1; if (window < 0x40) return (0); continue; } p = buff + offset; while (p + 7 < buff + bytes_avail) { if (memcmp(p, RAR_SIGNATURE, 7) == 0) return (30); p += 0x10; } offset = p - buff; } } return (0); } static int skip_sfx(struct archive_read *a) { const void *h; const char *p, *q; size_t skip, total; ssize_t bytes, window; total = 0; window = 4096; while (total + window <= (1024 * 128)) { h = __archive_read_ahead(a, window, &bytes); if (h == NULL) { /* Remaining bytes are less than window. */ window >>= 1; if (window < 0x40) goto fatal; continue; } if (bytes < 0x40) goto fatal; p = h; q = p + bytes; /* * Scan ahead until we find something that looks * like the RAR header. */ while (p + 7 < q) { if (memcmp(p, RAR_SIGNATURE, 7) == 0) { skip = p - (const char *)h; __archive_read_consume(a, skip); return (ARCHIVE_OK); } p += 0x10; } skip = p - (const char *)h; __archive_read_consume(a, skip); total += skip; } fatal: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Couldn't find out RAR header"); return (ARCHIVE_FATAL); } static int archive_read_format_rar_options(struct archive_read *a, const char *key, const char *val) { struct rar *rar; int ret = ARCHIVE_FAILED; rar = (struct rar *)(a->format->data); if (strcmp(key, "hdrcharset") == 0) { if (val == NULL || val[0] == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "rar: hdrcharset option needs a character-set name"); else { rar->opt_sconv = archive_string_conversion_from_charset( &a->archive, val, 0); if (rar->opt_sconv != NULL) ret = ARCHIVE_OK; else ret = ARCHIVE_FATAL; } return (ret); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int archive_read_format_rar_read_header(struct archive_read *a, struct archive_entry *entry) { const void *h; const char *p; struct rar *rar; size_t skip; char head_type; int ret; unsigned flags; unsigned long crc32_expected; a->archive.archive_format = ARCHIVE_FORMAT_RAR; if (a->archive.archive_format_name == NULL) a->archive.archive_format_name = "RAR"; rar = (struct rar *)(a->format->data); /* * It should be sufficient to call archive_read_next_header() for * a reader to determine if an entry is encrypted or not. If the * encryption of an entry is only detectable when calling * archive_read_data(), so be it. We'll do the same check there * as well. */ if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) { rar->has_encrypted_entries = 0; } /* RAR files can be generated without EOF headers, so return ARCHIVE_EOF if * this fails. */ if ((h = __archive_read_ahead(a, 7, NULL)) == NULL) return (ARCHIVE_EOF); p = h; if (rar->found_first_header == 0 && ((p[0] == 'M' && p[1] == 'Z') || memcmp(p, "\x7F\x45LF", 4) == 0)) { /* This is an executable ? Must be self-extracting... */ ret = skip_sfx(a); if (ret < ARCHIVE_WARN) return (ret); } rar->found_first_header = 1; while (1) { unsigned long crc32_val; if ((h = __archive_read_ahead(a, 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; head_type = p[2]; switch(head_type) { case MARK_HEAD: if (memcmp(p, RAR_SIGNATURE, 7) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid marker header"); return (ARCHIVE_FATAL); } __archive_read_consume(a, 7); break; case MAIN_HEAD: rar->main_flags = archive_le16dec(p + 3); skip = archive_le16dec(p + 5); if (skip < 7 + sizeof(rar->reserved1) + sizeof(rar->reserved2)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } if ((h = __archive_read_ahead(a, skip, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; memcpy(rar->reserved1, p + 7, sizeof(rar->reserved1)); memcpy(rar->reserved2, p + 7 + sizeof(rar->reserved1), sizeof(rar->reserved2)); if (rar->main_flags & MHD_ENCRYPTVER) { if (skip < 7 + sizeof(rar->reserved1) + sizeof(rar->reserved2)+1) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } rar->encryptver = *(p + 7 + sizeof(rar->reserved1) + sizeof(rar->reserved2)); } /* Main header is password encrytped, so we cannot read any file names or any other info about files from the header. */ if (rar->main_flags & MHD_PASSWORD) { archive_entry_set_is_metadata_encrypted(entry, 1); archive_entry_set_is_data_encrypted(entry, 1); rar->has_encrypted_entries = 1; archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR encryption support unavailable."); return (ARCHIVE_FATAL); } crc32_val = crc32(0, (const unsigned char *)p + 2, (unsigned)skip - 2); if ((crc32_val & 0xffff) != archive_le16dec(p)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return (ARCHIVE_FATAL); } __archive_read_consume(a, skip); break; case FILE_HEAD: return read_header(a, entry, head_type); case COMM_HEAD: case AV_HEAD: case SUB_HEAD: case PROTECT_HEAD: case SIGN_HEAD: case ENDARC_HEAD: flags = archive_le16dec(p + 3); skip = archive_le16dec(p + 5); if (skip < 7) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size too small"); return (ARCHIVE_FATAL); } if (flags & HD_ADD_SIZE_PRESENT) { if (skip < 7 + 4) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size too small"); return (ARCHIVE_FATAL); } if ((h = __archive_read_ahead(a, skip, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; skip += archive_le32dec(p + 7); } /* Skip over the 2-byte CRC at the beginning of the header. */ crc32_expected = archive_le16dec(p); __archive_read_consume(a, 2); skip -= 2; /* Skim the entire header and compute the CRC. */ crc32_val = 0; while (skip > 0) { size_t to_read = skip; ssize_t did_read; if (to_read > 32 * 1024) { to_read = 32 * 1024; } if ((h = __archive_read_ahead(a, to_read, &did_read)) == NULL) { return (ARCHIVE_FATAL); } p = h; crc32_val = crc32(crc32_val, (const unsigned char *)p, (unsigned)did_read); __archive_read_consume(a, did_read); skip -= did_read; } if ((crc32_val & 0xffff) != crc32_expected) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return (ARCHIVE_FATAL); } if (head_type == ENDARC_HEAD) return (ARCHIVE_EOF); break; case NEWSUB_HEAD: if ((ret = read_header(a, entry, head_type)) < ARCHIVE_WARN) return ret; break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Bad RAR file"); return (ARCHIVE_FATAL); } } } static int archive_read_format_rar_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct rar *rar = (struct rar *)(a->format->data); int ret; if (rar->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) { rar->has_encrypted_entries = 0; } if (rar->bytes_unconsumed > 0) { /* Consume as much as the decompressor actually used. */ __archive_read_consume(a, rar->bytes_unconsumed); rar->bytes_unconsumed = 0; } *buff = NULL; if (rar->entry_eof || rar->offset_seek >= rar->unp_size) { *size = 0; *offset = rar->offset; if (*offset < rar->unp_size) *offset = rar->unp_size; return (ARCHIVE_EOF); } switch (rar->compression_method) { case COMPRESS_METHOD_STORE: ret = read_data_stored(a, buff, size, offset); break; case COMPRESS_METHOD_FASTEST: case COMPRESS_METHOD_FAST: case COMPRESS_METHOD_NORMAL: case COMPRESS_METHOD_GOOD: case COMPRESS_METHOD_BEST: ret = read_data_compressed(a, buff, size, offset); if (ret != ARCHIVE_OK && ret != ARCHIVE_WARN) __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported compression method for RAR file."); ret = ARCHIVE_FATAL; break; } return (ret); } static int archive_read_format_rar_read_data_skip(struct archive_read *a) { struct rar *rar; int64_t bytes_skipped; int ret; rar = (struct rar *)(a->format->data); if (rar->bytes_unconsumed > 0) { /* Consume as much as the decompressor actually used. */ __archive_read_consume(a, rar->bytes_unconsumed); rar->bytes_unconsumed = 0; } if (rar->bytes_remaining > 0) { bytes_skipped = __archive_read_consume(a, rar->bytes_remaining); if (bytes_skipped < 0) return (ARCHIVE_FATAL); } /* Compressed data to skip must be read from each header in a multivolume * archive. */ if (rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER) { ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) ret = archive_read_format_rar_read_header(a, a->entry); if (ret != (ARCHIVE_OK)) return ret; return archive_read_format_rar_read_data_skip(a); } return (ARCHIVE_OK); } static int64_t archive_read_format_rar_seek_data(struct archive_read *a, int64_t offset, int whence) { int64_t client_offset, ret; unsigned int i; struct rar *rar = (struct rar *)(a->format->data); if (rar->compression_method == COMPRESS_METHOD_STORE) { /* Modify the offset for use with SEEK_SET */ switch (whence) { case SEEK_CUR: client_offset = rar->offset_seek; break; case SEEK_END: client_offset = rar->unp_size; break; case SEEK_SET: default: client_offset = 0; } client_offset += offset; if (client_offset < 0) { /* Can't seek past beginning of data block */ return -1; } else if (client_offset > rar->unp_size) { /* * Set the returned offset but only seek to the end of * the data block. */ rar->offset_seek = client_offset; client_offset = rar->unp_size; } client_offset += rar->dbo[0].start_offset; i = 0; while (i < rar->cursor) { i++; client_offset += rar->dbo[i].start_offset - rar->dbo[i-1].end_offset; } if (rar->main_flags & MHD_VOLUME) { /* Find the appropriate offset among the multivolume archive */ while (1) { if (client_offset < rar->dbo[rar->cursor].start_offset && rar->file_flags & FHD_SPLIT_BEFORE) { /* Search backwards for the correct data block */ if (rar->cursor == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Attempt to seek past beginning of RAR data block"); return (ARCHIVE_FAILED); } rar->cursor--; client_offset -= rar->dbo[rar->cursor+1].start_offset - rar->dbo[rar->cursor].end_offset; if (client_offset < rar->dbo[rar->cursor].start_offset) continue; ret = __archive_read_seek(a, rar->dbo[rar->cursor].start_offset - rar->dbo[rar->cursor].header_size, SEEK_SET); if (ret < (ARCHIVE_OK)) return ret; ret = archive_read_format_rar_read_header(a, a->entry); if (ret != (ARCHIVE_OK)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Error during seek of RAR file"); return (ARCHIVE_FAILED); } rar->cursor--; break; } else if (client_offset > rar->dbo[rar->cursor].end_offset && rar->file_flags & FHD_SPLIT_AFTER) { /* Search forward for the correct data block */ rar->cursor++; if (rar->cursor < rar->nodes && client_offset > rar->dbo[rar->cursor].end_offset) { client_offset += rar->dbo[rar->cursor].start_offset - rar->dbo[rar->cursor-1].end_offset; continue; } rar->cursor--; ret = __archive_read_seek(a, rar->dbo[rar->cursor].end_offset, SEEK_SET); if (ret < (ARCHIVE_OK)) return ret; ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) { rar->has_endarc_header = 1; ret = archive_read_format_rar_read_header(a, a->entry); } if (ret != (ARCHIVE_OK)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Error during seek of RAR file"); return (ARCHIVE_FAILED); } client_offset += rar->dbo[rar->cursor].start_offset - rar->dbo[rar->cursor-1].end_offset; continue; } break; } } ret = __archive_read_seek(a, client_offset, SEEK_SET); if (ret < (ARCHIVE_OK)) return ret; rar->bytes_remaining = rar->dbo[rar->cursor].end_offset - ret; i = rar->cursor; while (i > 0) { i--; ret -= rar->dbo[i+1].start_offset - rar->dbo[i].end_offset; } ret -= rar->dbo[0].start_offset; /* Always restart reading the file after a seek */ __archive_reset_read_data(&a->archive); rar->bytes_unconsumed = 0; rar->offset = 0; /* * If a seek past the end of file was requested, return the requested * offset. */ if (ret == rar->unp_size && rar->offset_seek > rar->unp_size) return rar->offset_seek; /* Return the new offset */ rar->offset_seek = ret; return rar->offset_seek; } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Seeking of compressed RAR files is unsupported"); } return (ARCHIVE_FAILED); } static int archive_read_format_rar_cleanup(struct archive_read *a) { struct rar *rar; rar = (struct rar *)(a->format->data); free_codes(a); free(rar->filename); free(rar->filename_save); free(rar->dbo); free(rar->unp_buffer); free(rar->lzss.window); __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); free(rar); (a->format->data) = NULL; return (ARCHIVE_OK); } static int read_header(struct archive_read *a, struct archive_entry *entry, char head_type) { const void *h; const char *p, *endp; struct rar *rar; struct rar_header rar_header; struct rar_file_header file_header; int64_t header_size; unsigned filename_size, end; char *filename; char *strp; char packed_size[8]; char unp_size[8]; int ttime; struct archive_string_conv *sconv, *fn_sconv; unsigned long crc32_val; int ret = (ARCHIVE_OK), ret2; rar = (struct rar *)(a->format->data); /* Setup a string conversion object for non-rar-unicode filenames. */ sconv = rar->opt_sconv; if (sconv == NULL) { if (!rar->init_default_conversion) { rar->sconv_default = archive_string_default_conversion_for_read( &(a->archive)); rar->init_default_conversion = 1; } sconv = rar->sconv_default; } if ((h = __archive_read_ahead(a, 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; memcpy(&rar_header, p, sizeof(rar_header)); rar->file_flags = archive_le16dec(rar_header.flags); header_size = archive_le16dec(rar_header.size); if (header_size < (int64_t)sizeof(file_header) + 7) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2); __archive_read_consume(a, 7); if (!(rar->file_flags & FHD_SOLID)) { rar->compression_method = 0; rar->packed_size = 0; rar->unp_size = 0; rar->mtime = 0; rar->ctime = 0; rar->atime = 0; rar->arctime = 0; rar->mode = 0; memset(&rar->salt, 0, sizeof(rar->salt)); rar->atime = 0; rar->ansec = 0; rar->ctime = 0; rar->cnsec = 0; rar->mtime = 0; rar->mnsec = 0; rar->arctime = 0; rar->arcnsec = 0; } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR solid archive support unavailable."); return (ARCHIVE_FATAL); } if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); /* File Header CRC check. */ crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7)); if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return (ARCHIVE_FATAL); } /* If no CRC error, Go on parsing File Header. */ p = h; endp = p + header_size - 7; memcpy(&file_header, p, sizeof(file_header)); p += sizeof(file_header); rar->compression_method = file_header.method; ttime = archive_le32dec(file_header.file_time); rar->mtime = get_time(ttime); rar->file_crc = archive_le32dec(file_header.file_crc); if (rar->file_flags & FHD_PASSWORD) { archive_entry_set_is_data_encrypted(entry, 1); rar->has_encrypted_entries = 1; archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR encryption support unavailable."); /* Since it is only the data part itself that is encrypted we can at least extract information about the currently processed entry and don't need to return ARCHIVE_FATAL here. */ /*return (ARCHIVE_FATAL);*/ } if (rar->file_flags & FHD_LARGE) { memcpy(packed_size, file_header.pack_size, 4); memcpy(packed_size + 4, p, 4); /* High pack size */ p += 4; memcpy(unp_size, file_header.unp_size, 4); memcpy(unp_size + 4, p, 4); /* High unpack size */ p += 4; rar->packed_size = archive_le64dec(&packed_size); rar->unp_size = archive_le64dec(&unp_size); } else { rar->packed_size = archive_le32dec(file_header.pack_size); rar->unp_size = archive_le32dec(file_header.unp_size); } if (rar->packed_size < 0 || rar->unp_size < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid sizes specified."); return (ARCHIVE_FATAL); } rar->bytes_remaining = rar->packed_size; /* TODO: RARv3 subblocks contain comments. For now the complete block is * consumed at the end. */ if (head_type == NEWSUB_HEAD) { size_t distance = p - (const char *)h; header_size += rar->packed_size; /* Make sure we have the extended data. */ if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; endp = p + header_size - 7; p += distance; } filename_size = archive_le16dec(file_header.name_size); if (p + filename_size > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename size"); return (ARCHIVE_FATAL); } if (rar->filename_allocated < filename_size * 2 + 2) { char *newptr; size_t newsize = filename_size * 2 + 2; newptr = realloc(rar->filename, newsize); if (newptr == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->filename = newptr; rar->filename_allocated = newsize; } filename = rar->filename; memcpy(filename, p, filename_size); filename[filename_size] = '\0'; if (rar->file_flags & FHD_UNICODE) { if (filename_size != strlen(filename)) { unsigned char highbyte, flagbits, flagbyte; unsigned fn_end, offset; end = filename_size; fn_end = filename_size * 2; filename_size = 0; offset = (unsigned)strlen(filename) + 1; highbyte = *(p + offset++); flagbits = 0; flagbyte = 0; while (offset < end && filename_size < fn_end) { if (!flagbits) { flagbyte = *(p + offset++); flagbits = 8; } flagbits -= 2; switch((flagbyte >> flagbits) & 3) { case 0: filename[filename_size++] = '\0'; filename[filename_size++] = *(p + offset++); break; case 1: filename[filename_size++] = highbyte; filename[filename_size++] = *(p + offset++); break; case 2: filename[filename_size++] = *(p + offset + 1); filename[filename_size++] = *(p + offset); offset += 2; break; case 3: { char extra, high; uint8_t length = *(p + offset++); if (length & 0x80) { extra = *(p + offset++); high = (char)highbyte; } else extra = high = 0; length = (length & 0x7f) + 2; while (length && filename_size < fn_end) { unsigned cp = filename_size >> 1; filename[filename_size++] = high; filename[filename_size++] = p[cp] + extra; length--; } } break; } } if (filename_size > fn_end) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename"); return (ARCHIVE_FATAL); } filename[filename_size++] = '\0'; filename[filename_size++] = '\0'; /* Decoded unicode form is UTF-16BE, so we have to update a string * conversion object for it. */ if (rar->sconv_utf16be == NULL) { rar->sconv_utf16be = archive_string_conversion_from_charset( &a->archive, "UTF-16BE", 1); if (rar->sconv_utf16be == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf16be; strp = filename; while (memcmp(strp, "\x00\x00", 2)) { if (!memcmp(strp, "\x00\\", 2)) *(strp + 1) = '/'; strp += 2; } p += offset; } else { /* * If FHD_UNICODE is set but no unicode data, this file name form * is UTF-8, so we have to update a string conversion object for * it accordingly. */ if (rar->sconv_utf8 == NULL) { rar->sconv_utf8 = archive_string_conversion_from_charset( &a->archive, "UTF-8", 1); if (rar->sconv_utf8 == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf8; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } } else { fn_sconv = sconv; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } /* Split file in multivolume RAR. No more need to process header. */ if (rar->filename_save && filename_size == rar->filename_save_size && !memcmp(rar->filename, rar->filename_save, filename_size + 1)) { __archive_read_consume(a, header_size - 7); rar->cursor++; if (rar->cursor >= rar->nodes) { rar->nodes++; if ((rar->dbo = realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[rar->cursor].header_size = header_size; rar->dbo[rar->cursor].start_offset = -1; rar->dbo[rar->cursor].end_offset = -1; } if (rar->dbo[rar->cursor].start_offset < 0) { rar->dbo[rar->cursor].start_offset = a->filter->position; rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset + rar->packed_size; } return ret; } rar->filename_save = (char*)realloc(rar->filename_save, filename_size + 1); memcpy(rar->filename_save, rar->filename, filename_size + 1); rar->filename_save_size = filename_size; /* Set info for seeking */ free(rar->dbo); if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[0].header_size = header_size; rar->dbo[0].start_offset = -1; rar->dbo[0].end_offset = -1; rar->cursor = 0; rar->nodes = 1; if (rar->file_flags & FHD_SALT) { if (p + 8 > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } memcpy(rar->salt, p, 8); p += 8; } if (rar->file_flags & FHD_EXTTIME) { if (read_exttime(p, rar, endp) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } } __archive_read_consume(a, header_size - 7); rar->dbo[0].start_offset = a->filter->position; rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size; switch(file_header.host_os) { case OS_MSDOS: case OS_OS2: case OS_WIN32: rar->mode = archive_le32dec(file_header.file_attr); if (rar->mode & FILE_ATTRIBUTE_DIRECTORY) rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH; else rar->mode = AE_IFREG; rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; break; case OS_UNIX: case OS_MAC_OS: case OS_BEOS: rar->mode = archive_le32dec(file_header.file_attr); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown file attributes from RAR file's host OS"); return (ARCHIVE_FATAL); } rar->bytes_uncopied = rar->bytes_unconsumed = 0; rar->lzss.position = rar->offset = 0; rar->offset_seek = 0; rar->dictionary_size = 0; rar->offset_outgoing = 0; rar->br.cache_avail = 0; rar->br.avail_in = 0; rar->crc_calculated = 0; rar->entry_eof = 0; rar->valid = 1; rar->is_ppmd_block = 0; rar->start_new_table = 1; free(rar->unp_buffer); rar->unp_buffer = NULL; rar->unp_offset = 0; rar->unp_buffer_size = UNP_BUFFER_SIZE; memset(rar->lengthtable, 0, sizeof(rar->lengthtable)); __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); rar->ppmd_valid = rar->ppmd_eod = 0; /* Don't set any archive entries for non-file header types */ if (head_type == NEWSUB_HEAD) return ret; archive_entry_set_mtime(entry, rar->mtime, rar->mnsec); archive_entry_set_ctime(entry, rar->ctime, rar->cnsec); archive_entry_set_atime(entry, rar->atime, rar->ansec); archive_entry_set_size(entry, rar->unp_size); archive_entry_set_mode(entry, rar->mode); if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv)) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted from %s to current locale.", archive_string_conversion_charset_name(fn_sconv)); ret = (ARCHIVE_WARN); } if (((rar->mode) & AE_IFMT) == AE_IFLNK) { /* Make sure a symbolic-link file does not have its body. */ rar->bytes_remaining = 0; archive_entry_set_size(entry, 0); /* Read a symbolic-link name. */ if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN)) return ret2; if (ret > ret2) ret = ret2; } if (rar->bytes_remaining == 0) rar->entry_eof = 1; return ret; } static time_t get_time(int ttime) { struct tm tm; tm.tm_sec = 2 * (ttime & 0x1f); tm.tm_min = (ttime >> 5) & 0x3f; tm.tm_hour = (ttime >> 11) & 0x1f; tm.tm_mday = (ttime >> 16) & 0x1f; tm.tm_mon = ((ttime >> 21) & 0x0f) - 1; tm.tm_year = ((ttime >> 25) & 0x7f) + 80; tm.tm_isdst = -1; return mktime(&tm); } static int read_exttime(const char *p, struct rar *rar, const char *endp) { unsigned rmode, flags, rem, j, count; int ttime, i; struct tm *tm; time_t t; long nsec; if (p + 2 > endp) return (-1); flags = archive_le16dec(p); p += 2; for (i = 3; i >= 0; i--) { t = 0; if (i == 3) t = rar->mtime; rmode = flags >> i * 4; if (rmode & 8) { if (!t) { if (p + 4 > endp) return (-1); ttime = archive_le32dec(p); t = get_time(ttime); p += 4; } rem = 0; count = rmode & 3; if (p + count > endp) return (-1); for (j = 0; j < count; j++) { rem = ((*p) << 16) | (rem >> 8); p++; } tm = localtime(&t); nsec = tm->tm_sec + rem / NS_UNIT; if (rmode & 4) { tm->tm_sec++; t = mktime(tm); } if (i == 3) { rar->mtime = t; rar->mnsec = nsec; } else if (i == 2) { rar->ctime = t; rar->cnsec = nsec; } else if (i == 1) { rar->atime = t; rar->ansec = nsec; } else { rar->arctime = t; rar->arcnsec = nsec; } } } return (0); } static int read_symlink_stored(struct archive_read *a, struct archive_entry *entry, struct archive_string_conv *sconv) { const void *h; const char *p; struct rar *rar; int ret = (ARCHIVE_OK); rar = (struct rar *)(a->format->data); if ((h = rar_read_ahead(a, (size_t)rar->packed_size, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; if (archive_entry_copy_symlink_l(entry, p, (size_t)rar->packed_size, sconv)) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for link"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "link cannot be converted from %s to current locale.", archive_string_conversion_charset_name(sconv)); ret = (ARCHIVE_WARN); } __archive_read_consume(a, rar->packed_size); return ret; } static int read_data_stored(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct rar *rar; ssize_t bytes_avail; rar = (struct rar *)(a->format->data); if (rar->bytes_remaining == 0 && !(rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER)) { *buff = NULL; *size = 0; *offset = rar->offset; if (rar->file_crc != rar->crc_calculated) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "File CRC error"); return (ARCHIVE_FATAL); } rar->entry_eof = 1; return (ARCHIVE_EOF); } *buff = rar_read_ahead(a, 1, &bytes_avail); if (bytes_avail <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); return (ARCHIVE_FATAL); } *size = bytes_avail; *offset = rar->offset; rar->offset += bytes_avail; rar->offset_seek += bytes_avail; rar->bytes_remaining -= bytes_avail; rar->bytes_unconsumed = bytes_avail; /* Calculate File CRC. */ rar->crc_calculated = crc32(rar->crc_calculated, *buff, (unsigned)bytes_avail); return (ARCHIVE_OK); } static int read_data_compressed(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct rar *rar; int64_t start, end, actualend; size_t bs; int ret = (ARCHIVE_OK), sym, code, lzss_offset, length, i; rar = (struct rar *)(a->format->data); do { if (!rar->valid) return (ARCHIVE_FATAL); if (rar->ppmd_eod || (rar->dictionary_size && rar->offset >= rar->unp_size)) { if (rar->unp_offset > 0) { /* * We have unprocessed extracted data. write it out. */ *buff = rar->unp_buffer; *size = rar->unp_offset; *offset = rar->offset_outgoing; rar->offset_outgoing += *size; /* Calculate File CRC. */ rar->crc_calculated = crc32(rar->crc_calculated, *buff, (unsigned)*size); rar->unp_offset = 0; return (ARCHIVE_OK); } *buff = NULL; *size = 0; *offset = rar->offset; if (rar->file_crc != rar->crc_calculated) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "File CRC error"); return (ARCHIVE_FATAL); } rar->entry_eof = 1; return (ARCHIVE_EOF); } if (!rar->is_ppmd_block && rar->dictionary_size && rar->bytes_uncopied > 0) { if (rar->bytes_uncopied > (rar->unp_buffer_size - rar->unp_offset)) bs = rar->unp_buffer_size - rar->unp_offset; else bs = (size_t)rar->bytes_uncopied; ret = copy_from_lzss_window(a, buff, rar->offset, (int)bs); if (ret != ARCHIVE_OK) return (ret); rar->offset += bs; rar->bytes_uncopied -= bs; if (*buff != NULL) { rar->unp_offset = 0; *size = rar->unp_buffer_size; *offset = rar->offset_outgoing; rar->offset_outgoing += *size; /* Calculate File CRC. */ rar->crc_calculated = crc32(rar->crc_calculated, *buff, (unsigned)*size); return (ret); } continue; } if (!rar->br.next_in && (ret = rar_br_preparation(a, &(rar->br))) < ARCHIVE_WARN) return (ret); if (rar->start_new_table && ((ret = parse_codes(a)) < (ARCHIVE_WARN))) return (ret); if (rar->is_ppmd_block) { if ((sym = __archive_ppmd7_functions.Ppmd7_DecodeSymbol( &rar->ppmd7_context, &rar->range_dec.p)) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid symbol"); return (ARCHIVE_FATAL); } if(sym != rar->ppmd_escape) { lzss_emit_literal(rar, sym); rar->bytes_uncopied++; } else { if ((code = __archive_ppmd7_functions.Ppmd7_DecodeSymbol( &rar->ppmd7_context, &rar->range_dec.p)) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid symbol"); return (ARCHIVE_FATAL); } switch(code) { case 0: rar->start_new_table = 1; return read_data_compressed(a, buff, size, offset); case 2: rar->ppmd_eod = 1;/* End Of ppmd Data. */ continue; case 3: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Parsing filters is unsupported."); return (ARCHIVE_FAILED); case 4: lzss_offset = 0; for (i = 2; i >= 0; i--) { if ((code = __archive_ppmd7_functions.Ppmd7_DecodeSymbol( &rar->ppmd7_context, &rar->range_dec.p)) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid symbol"); return (ARCHIVE_FATAL); } lzss_offset |= code << (i * 8); } if ((length = __archive_ppmd7_functions.Ppmd7_DecodeSymbol( &rar->ppmd7_context, &rar->range_dec.p)) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid symbol"); return (ARCHIVE_FATAL); } lzss_emit_match(rar, lzss_offset + 2, length + 32); rar->bytes_uncopied += length + 32; break; case 5: if ((length = __archive_ppmd7_functions.Ppmd7_DecodeSymbol( &rar->ppmd7_context, &rar->range_dec.p)) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid symbol"); return (ARCHIVE_FATAL); } lzss_emit_match(rar, 1, length + 4); rar->bytes_uncopied += length + 4; break; default: lzss_emit_literal(rar, sym); rar->bytes_uncopied++; } } } else { start = rar->offset; end = start + rar->dictionary_size; rar->filterstart = INT64_MAX; if ((actualend = expand(a, end)) < 0) return ((int)actualend); rar->bytes_uncopied = actualend - start; if (rar->bytes_uncopied == 0) { /* Broken RAR files cause this case. * NOTE: If this case were possible on a normal RAR file * we would find out where it was actually bad and * what we would do to solve it. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Internal error extracting RAR file"); return (ARCHIVE_FATAL); } } if (rar->bytes_uncopied > (rar->unp_buffer_size - rar->unp_offset)) bs = rar->unp_buffer_size - rar->unp_offset; else bs = (size_t)rar->bytes_uncopied; ret = copy_from_lzss_window(a, buff, rar->offset, (int)bs); if (ret != ARCHIVE_OK) return (ret); rar->offset += bs; rar->bytes_uncopied -= bs; /* * If *buff is NULL, it means unp_buffer is not full. * So we have to continue extracting a RAR file. */ } while (*buff == NULL); rar->unp_offset = 0; *size = rar->unp_buffer_size; *offset = rar->offset_outgoing; rar->offset_outgoing += *size; /* Calculate File CRC. */ rar->crc_calculated = crc32(rar->crc_calculated, *buff, (unsigned)*size); return ret; } static int parse_codes(struct archive_read *a) { int i, j, val, n, r; unsigned char bitlengths[MAX_SYMBOLS], zerocount, ppmd_flags; unsigned int maxorder; struct huffman_code precode; struct rar *rar = (struct rar *)(a->format->data); struct rar_br *br = &(rar->br); free_codes(a); /* Skip to the next byte */ rar_br_consume_unalined_bits(br); /* PPMd block flag */ if (!rar_br_read_ahead(a, br, 1)) goto truncated_data; if ((rar->is_ppmd_block = rar_br_bits(br, 1)) != 0) { rar_br_consume(br, 1); if (!rar_br_read_ahead(a, br, 7)) goto truncated_data; ppmd_flags = rar_br_bits(br, 7); rar_br_consume(br, 7); /* Memory is allocated in MB */ if (ppmd_flags & 0x20) { if (!rar_br_read_ahead(a, br, 8)) goto truncated_data; rar->dictionary_size = (rar_br_bits(br, 8) + 1) << 20; rar_br_consume(br, 8); } if (ppmd_flags & 0x40) { if (!rar_br_read_ahead(a, br, 8)) goto truncated_data; rar->ppmd_escape = rar->ppmd7_context.InitEsc = rar_br_bits(br, 8); rar_br_consume(br, 8); } else rar->ppmd_escape = 2; if (ppmd_flags & 0x20) { maxorder = (ppmd_flags & 0x1F) + 1; if(maxorder > 16) maxorder = 16 + (maxorder - 16) * 3; if (maxorder == 1) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); return (ARCHIVE_FATAL); } /* Make sure ppmd7_contest is freed before Ppmd7_Construct * because reading a broken file cause this abnormal sequence. */ __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); rar->bytein.a = a; rar->bytein.Read = &ppmd_read; __archive_ppmd7_functions.PpmdRAR_RangeDec_CreateVTable(&rar->range_dec); rar->range_dec.Stream = &rar->bytein; __archive_ppmd7_functions.Ppmd7_Construct(&rar->ppmd7_context); if (!__archive_ppmd7_functions.Ppmd7_Alloc(&rar->ppmd7_context, rar->dictionary_size, &g_szalloc)) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unable to initialize PPMd range decoder"); return (ARCHIVE_FATAL); } __archive_ppmd7_functions.Ppmd7_Init(&rar->ppmd7_context, maxorder); rar->ppmd_valid = 1; } else { if (!rar->ppmd_valid) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid PPMd sequence"); return (ARCHIVE_FATAL); } if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unable to initialize PPMd range decoder"); return (ARCHIVE_FATAL); } } } else { rar_br_consume(br, 1); /* Keep existing table flag */ if (!rar_br_read_ahead(a, br, 1)) goto truncated_data; if (!rar_br_bits(br, 1)) memset(rar->lengthtable, 0, sizeof(rar->lengthtable)); rar_br_consume(br, 1); memset(&bitlengths, 0, sizeof(bitlengths)); for (i = 0; i < MAX_SYMBOLS;) { if (!rar_br_read_ahead(a, br, 4)) goto truncated_data; bitlengths[i++] = rar_br_bits(br, 4); rar_br_consume(br, 4); if (bitlengths[i-1] == 0xF) { if (!rar_br_read_ahead(a, br, 4)) goto truncated_data; zerocount = rar_br_bits(br, 4); rar_br_consume(br, 4); if (zerocount) { i--; for (j = 0; j < zerocount + 2 && i < MAX_SYMBOLS; j++) bitlengths[i++] = 0; } } } memset(&precode, 0, sizeof(precode)); r = create_code(a, &precode, bitlengths, MAX_SYMBOLS, MAX_SYMBOL_LENGTH); if (r != ARCHIVE_OK) { free(precode.tree); free(precode.table); return (r); } for (i = 0; i < HUFFMAN_TABLE_SIZE;) { if ((val = read_next_symbol(a, &precode)) < 0) { free(precode.tree); free(precode.table); return (ARCHIVE_FATAL); } if (val < 16) { rar->lengthtable[i] = (rar->lengthtable[i] + val) & 0xF; i++; } else if (val < 18) { if (i == 0) { free(precode.tree); free(precode.table); archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Internal error extracting RAR file."); return (ARCHIVE_FATAL); } if(val == 16) { if (!rar_br_read_ahead(a, br, 3)) { free(precode.tree); free(precode.table); goto truncated_data; } n = rar_br_bits(br, 3) + 3; rar_br_consume(br, 3); } else { if (!rar_br_read_ahead(a, br, 7)) { free(precode.tree); free(precode.table); goto truncated_data; } n = rar_br_bits(br, 7) + 11; rar_br_consume(br, 7); } for (j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++) { rar->lengthtable[i] = rar->lengthtable[i-1]; i++; } } else { if(val == 18) { if (!rar_br_read_ahead(a, br, 3)) { free(precode.tree); free(precode.table); goto truncated_data; } n = rar_br_bits(br, 3) + 3; rar_br_consume(br, 3); } else { if (!rar_br_read_ahead(a, br, 7)) { free(precode.tree); free(precode.table); goto truncated_data; } n = rar_br_bits(br, 7) + 11; rar_br_consume(br, 7); } for(j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++) rar->lengthtable[i++] = 0; } } free(precode.tree); free(precode.table); r = create_code(a, &rar->maincode, &rar->lengthtable[0], MAINCODE_SIZE, MAX_SYMBOL_LENGTH); if (r != ARCHIVE_OK) return (r); r = create_code(a, &rar->offsetcode, &rar->lengthtable[MAINCODE_SIZE], OFFSETCODE_SIZE, MAX_SYMBOL_LENGTH); if (r != ARCHIVE_OK) return (r); r = create_code(a, &rar->lowoffsetcode, &rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE], LOWOFFSETCODE_SIZE, MAX_SYMBOL_LENGTH); if (r != ARCHIVE_OK) return (r); r = create_code(a, &rar->lengthcode, &rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE + LOWOFFSETCODE_SIZE], LENGTHCODE_SIZE, MAX_SYMBOL_LENGTH); if (r != ARCHIVE_OK) return (r); } if (!rar->dictionary_size || !rar->lzss.window) { /* Seems as though dictionary sizes are not used. Even so, minimize * memory usage as much as possible. */ void *new_window; unsigned int new_size; if (rar->unp_size >= DICTIONARY_MAX_SIZE) new_size = DICTIONARY_MAX_SIZE; else new_size = rar_fls((unsigned int)rar->unp_size) << 1; new_window = realloc(rar->lzss.window, new_size); if (new_window == NULL) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for uncompressed data."); return (ARCHIVE_FATAL); } rar->lzss.window = (unsigned char *)new_window; rar->dictionary_size = new_size; memset(rar->lzss.window, 0, rar->dictionary_size); rar->lzss.mask = rar->dictionary_size - 1; } rar->start_new_table = 0; return (ARCHIVE_OK); truncated_data: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); rar->valid = 0; return (ARCHIVE_FATAL); } static void free_codes(struct archive_read *a) { struct rar *rar = (struct rar *)(a->format->data); free(rar->maincode.tree); free(rar->offsetcode.tree); free(rar->lowoffsetcode.tree); free(rar->lengthcode.tree); free(rar->maincode.table); free(rar->offsetcode.table); free(rar->lowoffsetcode.table); free(rar->lengthcode.table); memset(&rar->maincode, 0, sizeof(rar->maincode)); memset(&rar->offsetcode, 0, sizeof(rar->offsetcode)); memset(&rar->lowoffsetcode, 0, sizeof(rar->lowoffsetcode)); memset(&rar->lengthcode, 0, sizeof(rar->lengthcode)); } static int read_next_symbol(struct archive_read *a, struct huffman_code *code) { unsigned char bit; unsigned int bits; int length, value, node; struct rar *rar; struct rar_br *br; if (!code->table) { if (make_table(a, code) != (ARCHIVE_OK)) return -1; } rar = (struct rar *)(a->format->data); br = &(rar->br); /* Look ahead (peek) at bits */ if (!rar_br_read_ahead(a, br, code->tablesize)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); rar->valid = 0; return -1; } bits = rar_br_bits(br, code->tablesize); length = code->table[bits].length; value = code->table[bits].value; if (length < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid prefix code in bitstream"); return -1; } if (length <= code->tablesize) { /* Skip length bits */ rar_br_consume(br, length); return value; } /* Skip tablesize bits */ rar_br_consume(br, code->tablesize); node = value; while (!(code->tree[node].branches[0] == code->tree[node].branches[1])) { if (!rar_br_read_ahead(a, br, 1)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); rar->valid = 0; return -1; } bit = rar_br_bits(br, 1); rar_br_consume(br, 1); if (code->tree[node].branches[bit] < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid prefix code in bitstream"); return -1; } node = code->tree[node].branches[bit]; } return code->tree[node].branches[0]; } static int create_code(struct archive_read *a, struct huffman_code *code, unsigned char *lengths, int numsymbols, char maxlength) { int i, j, codebits = 0, symbolsleft = numsymbols; code->numentries = 0; code->numallocatedentries = 0; if (new_node(code) < 0) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for node data."); return (ARCHIVE_FATAL); } code->numentries = 1; code->minlength = INT_MAX; code->maxlength = INT_MIN; codebits = 0; for(i = 1; i <= maxlength; i++) { for(j = 0; j < numsymbols; j++) { if (lengths[j] != i) continue; if (add_value(a, code, j, codebits, i) != ARCHIVE_OK) return (ARCHIVE_FATAL); codebits++; if (--symbolsleft <= 0) { break; break; } } codebits <<= 1; } return (ARCHIVE_OK); } static int add_value(struct archive_read *a, struct huffman_code *code, int value, int codebits, int length) { int repeatpos, lastnode, bitpos, bit, repeatnode, nextnode; free(code->table); code->table = NULL; if(length > code->maxlength) code->maxlength = length; if(length < code->minlength) code->minlength = length; repeatpos = -1; if (repeatpos == 0 || (repeatpos >= 0 && (((codebits >> (repeatpos - 1)) & 3) == 0 || ((codebits >> (repeatpos - 1)) & 3) == 3))) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid repeat position"); return (ARCHIVE_FATAL); } lastnode = 0; for (bitpos = length - 1; bitpos >= 0; bitpos--) { bit = (codebits >> bitpos) & 1; /* Leaf node check */ if (code->tree[lastnode].branches[0] == code->tree[lastnode].branches[1]) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Prefix found"); return (ARCHIVE_FATAL); } if (bitpos == repeatpos) { /* Open branch check */ if (!(code->tree[lastnode].branches[bit] < 0)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid repeating code"); return (ARCHIVE_FATAL); } if ((repeatnode = new_node(code)) < 0) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for node data."); return (ARCHIVE_FATAL); } if ((nextnode = new_node(code)) < 0) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for node data."); return (ARCHIVE_FATAL); } /* Set branches */ code->tree[lastnode].branches[bit] = repeatnode; code->tree[repeatnode].branches[bit] = repeatnode; code->tree[repeatnode].branches[bit^1] = nextnode; lastnode = nextnode; bitpos++; /* terminating bit already handled, skip it */ } else { /* Open branch check */ if (code->tree[lastnode].branches[bit] < 0) { if (new_node(code) < 0) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for node data."); return (ARCHIVE_FATAL); } code->tree[lastnode].branches[bit] = code->numentries++; } /* set to branch */ lastnode = code->tree[lastnode].branches[bit]; } } if (!(code->tree[lastnode].branches[0] == -1 && code->tree[lastnode].branches[1] == -2)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Prefix found"); return (ARCHIVE_FATAL); } /* Set leaf value */ code->tree[lastnode].branches[0] = value; code->tree[lastnode].branches[1] = value; return (ARCHIVE_OK); } static int new_node(struct huffman_code *code) { void *new_tree; if (code->numallocatedentries == code->numentries) { int new_num_entries = 256; if (code->numentries > 0) { new_num_entries = code->numentries * 2; } new_tree = realloc(code->tree, new_num_entries * sizeof(*code->tree)); if (new_tree == NULL) return (-1); code->tree = (struct huffman_tree_node *)new_tree; code->numallocatedentries = new_num_entries; } code->tree[code->numentries].branches[0] = -1; code->tree[code->numentries].branches[1] = -2; return 1; } static int make_table(struct archive_read *a, struct huffman_code *code) { if (code->maxlength < code->minlength || code->maxlength > 10) code->tablesize = 10; else code->tablesize = code->maxlength; code->table = (struct huffman_table_entry *)calloc(1, sizeof(*code->table) * ((size_t)1 << code->tablesize)); return make_table_recurse(a, code, 0, code->table, 0, code->tablesize); } static int make_table_recurse(struct archive_read *a, struct huffman_code *code, int node, struct huffman_table_entry *table, int depth, int maxdepth) { int currtablesize, i, ret = (ARCHIVE_OK); if (!code->tree) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Huffman tree was not created."); return (ARCHIVE_FATAL); } if (node < 0 || node >= code->numentries) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid location to Huffman tree specified."); return (ARCHIVE_FATAL); } currtablesize = 1 << (maxdepth - depth); if (code->tree[node].branches[0] == code->tree[node].branches[1]) { for(i = 0; i < currtablesize; i++) { table[i].length = depth; table[i].value = code->tree[node].branches[0]; } } else if (node < 0) { for(i = 0; i < currtablesize; i++) table[i].length = -1; } else { if(depth == maxdepth) { table[0].length = maxdepth + 1; table[0].value = node; } else { ret |= make_table_recurse(a, code, code->tree[node].branches[0], table, depth + 1, maxdepth); ret |= make_table_recurse(a, code, code->tree[node].branches[1], table + currtablesize / 2, depth + 1, maxdepth); } } return ret; } static int64_t expand(struct archive_read *a, int64_t end) { static const unsigned char lengthbases[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224 }; static const unsigned char lengthbits[] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5 }; static const unsigned int offsetbases[] = { 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, 131072, 196608, 262144, 327680, 393216, 458752, 524288, 589824, 655360, 720896, 786432, 851968, 917504, 983040, 1048576, 1310720, 1572864, 1835008, 2097152, 2359296, 2621440, 2883584, 3145728, 3407872, 3670016, 3932160 }; static const unsigned char offsetbits[] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18 }; static const unsigned char shortbases[] = { 0, 4, 8, 16, 32, 64, 128, 192 }; static const unsigned char shortbits[] = { 2, 2, 3, 4, 5, 6, 6, 6 }; int symbol, offs, len, offsindex, lensymbol, i, offssymbol, lowoffsetsymbol; unsigned char newfile; struct rar *rar = (struct rar *)(a->format->data); struct rar_br *br = &(rar->br); if (rar->filterstart < end) end = rar->filterstart; while (1) { if (rar->output_last_match && lzss_position(&rar->lzss) + rar->lastlength <= end) { lzss_emit_match(rar, rar->lastoffset, rar->lastlength); rar->output_last_match = 0; } if(rar->is_ppmd_block || rar->output_last_match || lzss_position(&rar->lzss) >= end) return lzss_position(&rar->lzss); if ((symbol = read_next_symbol(a, &rar->maincode)) < 0) return (ARCHIVE_FATAL); rar->output_last_match = 0; if (symbol < 256) { lzss_emit_literal(rar, symbol); continue; } else if (symbol == 256) { if (!rar_br_read_ahead(a, br, 1)) goto truncated_data; newfile = !rar_br_bits(br, 1); rar_br_consume(br, 1); if(newfile) { rar->start_new_block = 1; if (!rar_br_read_ahead(a, br, 1)) goto truncated_data; rar->start_new_table = rar_br_bits(br, 1); rar_br_consume(br, 1); return lzss_position(&rar->lzss); } else { if (parse_codes(a) != ARCHIVE_OK) return (ARCHIVE_FATAL); continue; } } else if(symbol==257) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Parsing filters is unsupported."); return (ARCHIVE_FAILED); } else if(symbol==258) { if(rar->lastlength == 0) continue; offs = rar->lastoffset; len = rar->lastlength; } else if (symbol <= 262) { offsindex = symbol - 259; offs = rar->oldoffset[offsindex]; if ((lensymbol = read_next_symbol(a, &rar->lengthcode)) < 0) goto bad_data; if (lensymbol > (int)(sizeof(lengthbases)/sizeof(lengthbases[0]))) goto bad_data; if (lensymbol > (int)(sizeof(lengthbits)/sizeof(lengthbits[0]))) goto bad_data; len = lengthbases[lensymbol] + 2; if (lengthbits[lensymbol] > 0) { if (!rar_br_read_ahead(a, br, lengthbits[lensymbol])) goto truncated_data; len += rar_br_bits(br, lengthbits[lensymbol]); rar_br_consume(br, lengthbits[lensymbol]); } for (i = offsindex; i > 0; i--) rar->oldoffset[i] = rar->oldoffset[i-1]; rar->oldoffset[0] = offs; } else if(symbol<=270) { offs = shortbases[symbol-263] + 1; if(shortbits[symbol-263] > 0) { if (!rar_br_read_ahead(a, br, shortbits[symbol-263])) goto truncated_data; offs += rar_br_bits(br, shortbits[symbol-263]); rar_br_consume(br, shortbits[symbol-263]); } len = 2; for(i = 3; i > 0; i--) rar->oldoffset[i] = rar->oldoffset[i-1]; rar->oldoffset[0] = offs; } else { if (symbol-271 > (int)(sizeof(lengthbases)/sizeof(lengthbases[0]))) goto bad_data; if (symbol-271 > (int)(sizeof(lengthbits)/sizeof(lengthbits[0]))) goto bad_data; len = lengthbases[symbol-271]+3; if(lengthbits[symbol-271] > 0) { if (!rar_br_read_ahead(a, br, lengthbits[symbol-271])) goto truncated_data; len += rar_br_bits(br, lengthbits[symbol-271]); rar_br_consume(br, lengthbits[symbol-271]); } if ((offssymbol = read_next_symbol(a, &rar->offsetcode)) < 0) goto bad_data; if (offssymbol > (int)(sizeof(offsetbases)/sizeof(offsetbases[0]))) goto bad_data; if (offssymbol > (int)(sizeof(offsetbits)/sizeof(offsetbits[0]))) goto bad_data; offs = offsetbases[offssymbol]+1; if(offsetbits[offssymbol] > 0) { if(offssymbol > 9) { if(offsetbits[offssymbol] > 4) { if (!rar_br_read_ahead(a, br, offsetbits[offssymbol] - 4)) goto truncated_data; offs += rar_br_bits(br, offsetbits[offssymbol] - 4) << 4; rar_br_consume(br, offsetbits[offssymbol] - 4); } if(rar->numlowoffsetrepeats > 0) { rar->numlowoffsetrepeats--; offs += rar->lastlowoffset; } else { if ((lowoffsetsymbol = read_next_symbol(a, &rar->lowoffsetcode)) < 0) return (ARCHIVE_FATAL); if(lowoffsetsymbol == 16) { rar->numlowoffsetrepeats = 15; offs += rar->lastlowoffset; } else { offs += lowoffsetsymbol; rar->lastlowoffset = lowoffsetsymbol; } } } else { if (!rar_br_read_ahead(a, br, offsetbits[offssymbol])) goto truncated_data; offs += rar_br_bits(br, offsetbits[offssymbol]); rar_br_consume(br, offsetbits[offssymbol]); } } if (offs >= 0x40000) len++; if (offs >= 0x2000) len++; for(i = 3; i > 0; i--) rar->oldoffset[i] = rar->oldoffset[i-1]; rar->oldoffset[0] = offs; } rar->lastoffset = offs; rar->lastlength = len; rar->output_last_match = 1; } truncated_data: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated RAR file data"); rar->valid = 0; return (ARCHIVE_FATAL); bad_data: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Bad RAR file data"); return (ARCHIVE_FATAL); } static int copy_from_lzss_window(struct archive_read *a, const void **buffer, int64_t startpos, int length) { int windowoffs, firstpart; struct rar *rar = (struct rar *)(a->format->data); if (!rar->unp_buffer) { if ((rar->unp_buffer = malloc(rar->unp_buffer_size)) == NULL) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for uncompressed data."); return (ARCHIVE_FATAL); } } windowoffs = lzss_offset_for_position(&rar->lzss, startpos); if(windowoffs + length <= lzss_size(&rar->lzss)) memcpy(&rar->unp_buffer[rar->unp_offset], &rar->lzss.window[windowoffs], length); else { firstpart = lzss_size(&rar->lzss) - windowoffs; if (firstpart < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Bad RAR file data"); return (ARCHIVE_FATAL); } if (firstpart < length) { memcpy(&rar->unp_buffer[rar->unp_offset], &rar->lzss.window[windowoffs], firstpart); memcpy(&rar->unp_buffer[rar->unp_offset + firstpart], &rar->lzss.window[0], length - firstpart); } else memcpy(&rar->unp_buffer[rar->unp_offset], &rar->lzss.window[windowoffs], length); } rar->unp_offset += length; if (rar->unp_offset >= rar->unp_buffer_size) *buffer = rar->unp_buffer; else *buffer = NULL; return (ARCHIVE_OK); } static const void * rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) { struct rar *rar = (struct rar *)(a->format->data); const void *h = __archive_read_ahead(a, min, avail); int ret; if (avail) { if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested) *avail = a->archive.read_data_requested; if (*avail > rar->bytes_remaining) *avail = (ssize_t)rar->bytes_remaining; if (*avail < 0) return NULL; else if (*avail == 0 && rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER) { ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) { rar->has_endarc_header = 1; ret = archive_read_format_rar_read_header(a, a->entry); } if (ret != (ARCHIVE_OK)) return NULL; return rar_read_ahead(a, min, avail); } } return h; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5035_0
crossvul-cpp_data_good_343_5
/* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Initially written by David Mattes <david.mattes@boeing.com> */ /* Support for multiple key containers by Lukas Wunner <lukas@wunner.de> */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "internal.h" #include "pkcs15.h" #define MANU_ID "Gemplus" #define APPLET_NAME "GemSAFE V1" #define DRIVER_SERIAL_NUMBER "v0.9" #define GEMSAFE_APP_PATH "3F001600" #define GEMSAFE_PATH "3F0016000004" /* Apparently, the Applet max read "quanta" is 248 bytes * Gemalto ClassicClient reads files in chunks of 238 bytes */ #define GEMSAFE_READ_QUANTUM 248 #define GEMSAFE_MAX_OBJLEN 28672 int sc_pkcs15emu_gemsafeV1_init_ex(sc_pkcs15_card_t *, struct sc_aid *,sc_pkcs15emu_opt_t *); static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags); static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags); static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags); typedef struct cdata_st { char *label; int authority; const char *path; size_t index; size_t count; const char *id; int obj_flags; } cdata; const unsigned int gemsafe_cert_max = 12; cdata gemsafe_cert[] = { {"DS certificate #1", 0, GEMSAFE_PATH, 0, 0, "45", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #2", 0, GEMSAFE_PATH, 0, 0, "46", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #3", 0, GEMSAFE_PATH, 0, 0, "47", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #4", 0, GEMSAFE_PATH, 0, 0, "48", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #5", 0, GEMSAFE_PATH, 0, 0, "49", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #6", 0, GEMSAFE_PATH, 0, 0, "50", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #7", 0, GEMSAFE_PATH, 0, 0, "51", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #8", 0, GEMSAFE_PATH, 0, 0, "52", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #9", 0, GEMSAFE_PATH, 0, 0, "53", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #10", 0, GEMSAFE_PATH, 0, 0, "54", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #11", 0, GEMSAFE_PATH, 0, 0, "55", SC_PKCS15_CO_FLAG_MODIFIABLE}, {"DS certificate #12", 0, GEMSAFE_PATH, 0, 0, "56", SC_PKCS15_CO_FLAG_MODIFIABLE}, }; typedef struct pdata_st { const u8 atr[SC_MAX_ATR_SIZE]; const size_t atr_len; const char *id; const char *label; const char *path; const int ref; const int type; const unsigned int maxlen; const unsigned int minlen; const int flags; const int tries_left; const char pad_char; const int obj_flags; } pindata; const unsigned int gemsafe_pin_max = 2; const pindata gemsafe_pin[] = { /* ATR-specific PIN policies, first match found is used: */ { {0x3B, 0x7D, 0x96, 0x00, 0x00, 0x80, 0x31, 0x80, 0x65, 0xB0, 0x83, 0x11, 0x48, 0xC8, 0x83, 0x00, 0x90, 0x00}, 18, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_ASCII_NUMERIC, 8, 4, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0x00, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE }, /* default PIN policy comes last: */ { { 0 }, 0, "01", "DS pin", GEMSAFE_PATH, 0x01, SC_PKCS15_PIN_TYPE_BCD, 16, 6, SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL, 3, 0xFF, SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE } }; typedef struct prdata_st { const char *id; char *label; unsigned int modulus_len; int usage; const char *path; int ref; const char *auth_id; int obj_flags; } prdata; #define USAGE_NONREP SC_PKCS15_PRKEY_USAGE_NONREPUDIATION #define USAGE_KE SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP #define USAGE_AUT SC_PKCS15_PRKEY_USAGE_ENCRYPT | \ SC_PKCS15_PRKEY_USAGE_DECRYPT | \ SC_PKCS15_PRKEY_USAGE_WRAP | \ SC_PKCS15_PRKEY_USAGE_UNWRAP | \ SC_PKCS15_PRKEY_USAGE_SIGN prdata gemsafe_prkeys[] = { { "45", "DS key #1", 1024, USAGE_AUT, GEMSAFE_PATH, 0x03, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "46", "DS key #2", 1024, USAGE_AUT, GEMSAFE_PATH, 0x04, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "47", "DS key #3", 1024, USAGE_AUT, GEMSAFE_PATH, 0x05, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "48", "DS key #4", 1024, USAGE_AUT, GEMSAFE_PATH, 0x06, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "49", "DS key #5", 1024, USAGE_AUT, GEMSAFE_PATH, 0x07, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "50", "DS key #6", 1024, USAGE_AUT, GEMSAFE_PATH, 0x08, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "51", "DS key #7", 1024, USAGE_AUT, GEMSAFE_PATH, 0x09, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "52", "DS key #8", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0a, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "53", "DS key #9", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0b, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "54", "DS key #10", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0c, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "55", "DS key #11", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0d, "01", SC_PKCS15_CO_FLAG_PRIVATE}, { "56", "DS key #12", 1024, USAGE_AUT, GEMSAFE_PATH, 0x0e, "01", SC_PKCS15_CO_FLAG_PRIVATE}, }; static int gemsafe_get_cert_len(sc_card_t *card) { int r; u8 ibuf[GEMSAFE_MAX_OBJLEN]; u8 *iptr; struct sc_path path; struct sc_file *file; size_t objlen, certlen; unsigned int ind, i=0; sc_format_path(GEMSAFE_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* Initial read */ r = sc_read_binary(card, 0, ibuf, GEMSAFE_READ_QUANTUM, 0); if (r < 0) return SC_ERROR_INTERNAL; /* Actual stored object size is encoded in first 2 bytes * (allocated EF space is much greater!) */ objlen = (((size_t) ibuf[0]) << 8) | ibuf[1]; sc_log(card->ctx, "Stored object is of size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); if (objlen < 1 || objlen > GEMSAFE_MAX_OBJLEN) { sc_log(card->ctx, "Invalid object size: %"SC_FORMAT_LEN_SIZE_T"u", objlen); return SC_ERROR_INTERNAL; } /* It looks like the first thing in the block is a table of * which keys are allocated. The table is small and is in the * first 248 bytes. Example for a card with 10 key containers: * 01 f0 00 03 03 b0 00 03 <= 1st key unallocated * 01 f0 00 04 03 b0 00 04 <= 2nd key unallocated * 01 fe 14 00 05 03 b0 00 05 <= 3rd key allocated * 01 fe 14 01 06 03 b0 00 06 <= 4th key allocated * 01 f0 00 07 03 b0 00 07 <= 5th key unallocated * ... * 01 f0 00 0c 03 b0 00 0c <= 10th key unallocated * For allocated keys, the fourth byte seems to indicate the * default key and the fifth byte indicates the key_ref of * the private key. */ ind = 2; /* skip length */ while (ibuf[ind] == 0x01 && i < gemsafe_cert_max) { if (ibuf[ind+1] == 0xFE) { gemsafe_prkeys[i].ref = ibuf[ind+4]; sc_log(card->ctx, "Key container %d is allocated and uses key_ref %d", i+1, gemsafe_prkeys[i].ref); ind += 9; } else { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; sc_log(card->ctx, "Key container %d is unallocated", i+1); ind += 8; } i++; } /* Delete additional key containers from the data structures if * this card can't accommodate them. */ for (; i < gemsafe_cert_max; i++) { gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } /* Read entire file, then dissect in memory. * Gemalto ClassicClient seems to do it the same way. */ iptr = ibuf + GEMSAFE_READ_QUANTUM; while ((size_t)(iptr - ibuf) < objlen) { r = sc_read_binary(card, iptr - ibuf, iptr, MIN(GEMSAFE_READ_QUANTUM, objlen - (iptr - ibuf)), 0); if (r < 0) { sc_log(card->ctx, "Could not read cert object"); return SC_ERROR_INTERNAL; } iptr += GEMSAFE_READ_QUANTUM; } /* Search buffer for certificates, they start with 0x3082. */ i = 0; while (ind < objlen - 1) { if (ibuf[ind] == 0x30 && ibuf[ind+1] == 0x82) { /* Find next allocated key container */ while (i < gemsafe_cert_max && gemsafe_cert[i].label == NULL) i++; if (i == gemsafe_cert_max) { sc_log(card->ctx, "Warning: Found orphaned certificate at offset %d", ind); return SC_SUCCESS; } /* DER cert len is encoded this way */ if (ind+3 >= sizeof ibuf) return SC_ERROR_INVALID_DATA; certlen = ((((size_t) ibuf[ind+2]) << 8) | ibuf[ind+3]) + 4; sc_log(card->ctx, "Found certificate of key container %d at offset %d, len %"SC_FORMAT_LEN_SIZE_T"u", i+1, ind, certlen); gemsafe_cert[i].index = ind; gemsafe_cert[i].count = certlen; ind += certlen; i++; } else ind++; } /* Delete additional key containers from the data structures if * they're missing on the card. */ for (; i < gemsafe_cert_max; i++) { if (gemsafe_cert[i].label) { sc_log(card->ctx, "Warning: Certificate of key container %d is missing", i+1); gemsafe_prkeys[i].label = NULL; gemsafe_cert[i].label = NULL; } } return SC_SUCCESS; } static int gemsafe_detect_card( sc_pkcs15_card_t *p15card) { if (strcmp(p15card->card->name, "GemSAFE V1")) return SC_ERROR_WRONG_CARD; return SC_SUCCESS; } static int sc_pkcs15emu_gemsafeV1_init( sc_pkcs15_card_t *p15card) { int r; unsigned int i; struct sc_path path; struct sc_file *file = NULL; struct sc_card *card = p15card->card; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_log(p15card->card->ctx, "Setting pkcs15 parameters"); if (p15card->tokeninfo->label) free(p15card->tokeninfo->label); p15card->tokeninfo->label = malloc(strlen(APPLET_NAME) + 1); if (!p15card->tokeninfo->label) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->label, APPLET_NAME); if (p15card->tokeninfo->serial_number) free(p15card->tokeninfo->serial_number); p15card->tokeninfo->serial_number = malloc(strlen(DRIVER_SERIAL_NUMBER) + 1); if (!p15card->tokeninfo->serial_number) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->serial_number, DRIVER_SERIAL_NUMBER); /* the GemSAFE applet version number */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0xdf, 0x03); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); /* Manual says Le=0x05, but should be 0x08 to return full version number */ apdu.le = 0x08; apdu.lc = 0; apdu.datalen = 0; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 != 0x90 || apdu.sw2 != 0x00) return SC_ERROR_INTERNAL; if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* the manufacturer ID, in this case GemPlus */ if (p15card->tokeninfo->manufacturer_id) free(p15card->tokeninfo->manufacturer_id); p15card->tokeninfo->manufacturer_id = malloc(strlen(MANU_ID) + 1); if (!p15card->tokeninfo->manufacturer_id) return SC_ERROR_INTERNAL; strcpy(p15card->tokeninfo->manufacturer_id, MANU_ID); /* determine allocated key containers and length of certificates */ r = gemsafe_get_cert_len(card); if (r != SC_SUCCESS) return SC_ERROR_INTERNAL; /* set certs */ sc_log(p15card->card->ctx, "Setting certificates"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; if (gemsafe_cert[i].label == NULL) continue; sc_format_path(gemsafe_cert[i].path, &path); sc_pkcs15_format_id(gemsafe_cert[i].id, &p15Id); path.index = gemsafe_cert[i].index; path.count = gemsafe_cert[i].count; sc_pkcs15emu_add_cert(p15card, SC_PKCS15_TYPE_CERT_X509, gemsafe_cert[i].authority, &path, &p15Id, gemsafe_cert[i].label, gemsafe_cert[i].obj_flags); } /* set gemsafe_pin */ sc_log(p15card->card->ctx, "Setting PIN"); for (i=0; i < gemsafe_pin_max; i++) { struct sc_pkcs15_id p15Id; struct sc_path path; sc_pkcs15_format_id(gemsafe_pin[i].id, &p15Id); sc_format_path(gemsafe_pin[i].path, &path); if (gemsafe_pin[i].atr_len == 0 || (gemsafe_pin[i].atr_len == p15card->card->atr.len && memcmp(p15card->card->atr.value, gemsafe_pin[i].atr, p15card->card->atr.len) == 0)) { sc_pkcs15emu_add_pin(p15card, &p15Id, gemsafe_pin[i].label, &path, gemsafe_pin[i].ref, gemsafe_pin[i].type, gemsafe_pin[i].minlen, gemsafe_pin[i].maxlen, gemsafe_pin[i].flags, gemsafe_pin[i].tries_left, gemsafe_pin[i].pad_char, gemsafe_pin[i].obj_flags); break; } }; /* set private keys */ sc_log(p15card->card->ctx, "Setting private keys"); for (i = 0; i < gemsafe_cert_max; i++) { struct sc_pkcs15_id p15Id, authId, *pauthId; struct sc_path path; int key_ref = 0x03; if (gemsafe_prkeys[i].label == NULL) continue; sc_pkcs15_format_id(gemsafe_prkeys[i].id, &p15Id); if (gemsafe_prkeys[i].auth_id) { sc_pkcs15_format_id(gemsafe_prkeys[i].auth_id, &authId); pauthId = &authId; } else pauthId = NULL; sc_format_path(gemsafe_prkeys[i].path, &path); /* * The key ref may be different for different sites; * by adding flags=n where the low order 4 bits can be * the key ref we can force it. */ if ( p15card->card->flags & 0x0F) { key_ref = p15card->card->flags & 0x0F; sc_debug(p15card->card->ctx, SC_LOG_DEBUG_NORMAL, "Overriding key_ref %d with %d\n", gemsafe_prkeys[i].ref, key_ref); } else key_ref = gemsafe_prkeys[i].ref; sc_pkcs15emu_add_prkey(p15card, &p15Id, gemsafe_prkeys[i].label, SC_PKCS15_TYPE_PRKEY_RSA, gemsafe_prkeys[i].modulus_len, gemsafe_prkeys[i].usage, &path, key_ref, pauthId, gemsafe_prkeys[i].obj_flags); } /* select the application DF */ sc_log(p15card->card->ctx, "Selecting application DF"); sc_format_path(GEMSAFE_APP_PATH, &path); r = sc_select_file(card, &path, &file); if (r != SC_SUCCESS || !file) return SC_ERROR_INTERNAL; /* set the application DF */ if (p15card->file_app) free(p15card->file_app); p15card->file_app = file; return SC_SUCCESS; } int sc_pkcs15emu_gemsafeV1_init_ex( sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_gemsafeV1_init(p15card); else { int r = gemsafe_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_gemsafeV1_init(p15card); } } static sc_pkcs15_df_t * sc_pkcs15emu_get_df(sc_pkcs15_card_t *p15card, unsigned int type) { sc_pkcs15_df_t *df; sc_file_t *file; int created = 0; while (1) { for (df = p15card->df_list; df; df = df->next) { if (df->type == type) { if (created) df->enumerated = 1; return df; } } assert(created == 0); file = sc_file_new(); if (!file) return NULL; sc_format_path("11001101", &file->path); sc_pkcs15_add_df(p15card, type, &file->path); sc_file_free(file); created++; } } static int sc_pkcs15emu_add_object(sc_pkcs15_card_t *p15card, int type, const char *label, void *data, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_object_t *obj; int df_type; obj = calloc(1, sizeof(*obj)); obj->type = type; obj->data = data; if (label) strncpy(obj->label, label, sizeof(obj->label)-1); obj->flags = obj_flags; if (auth_id) obj->auth_id = *auth_id; switch (type & SC_PKCS15_TYPE_CLASS_MASK) { case SC_PKCS15_TYPE_AUTH: df_type = SC_PKCS15_AODF; break; case SC_PKCS15_TYPE_PRKEY: df_type = SC_PKCS15_PRKDF; break; case SC_PKCS15_TYPE_PUBKEY: df_type = SC_PKCS15_PUKDF; break; case SC_PKCS15_TYPE_CERT: df_type = SC_PKCS15_CDF; break; default: sc_log(p15card->card->ctx, "Unknown PKCS15 object type %d", type); free(obj); return SC_ERROR_INVALID_ARGUMENTS; } obj->df = sc_pkcs15emu_get_df(p15card, df_type); sc_pkcs15_add_object(p15card, obj); return 0; } static int sc_pkcs15emu_add_pin(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, const sc_path_t *path, int ref, int type, unsigned int min_length, unsigned int max_length, int flags, int tries_left, const char pad_char, int obj_flags) { sc_pkcs15_auth_info_t *info; info = calloc(1, sizeof(*info)); if (!info) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); info->auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; info->auth_method = SC_AC_CHV; info->auth_id = *id; info->attrs.pin.min_length = min_length; info->attrs.pin.max_length = max_length; info->attrs.pin.stored_length = max_length; info->attrs.pin.type = type; info->attrs.pin.reference = ref; info->attrs.pin.flags = flags; info->attrs.pin.pad_char = pad_char; info->tries_left = tries_left; info->logged_in = SC_PIN_STATE_UNKNOWN; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, SC_PKCS15_TYPE_AUTH_PIN, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_cert(sc_pkcs15_card_t *p15card, int type, int authority, const sc_path_t *path, const sc_pkcs15_id_t *id, const char *label, int obj_flags) { sc_pkcs15_cert_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->authority = authority; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, NULL, obj_flags); } static int sc_pkcs15emu_add_prkey(sc_pkcs15_card_t *p15card, const sc_pkcs15_id_t *id, const char *label, int type, unsigned int modulus_length, int usage, const sc_path_t *path, int ref, const sc_pkcs15_id_t *auth_id, int obj_flags) { sc_pkcs15_prkey_info_t *info; info = calloc(1, sizeof(*info)); if (!info) { LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_OUT_OF_MEMORY); } info->id = *id; info->modulus_length = modulus_length; info->usage = usage; info->native = 1; info->access_flags = SC_PKCS15_PRKEY_ACCESS_SENSITIVE | SC_PKCS15_PRKEY_ACCESS_ALWAYSSENSITIVE | SC_PKCS15_PRKEY_ACCESS_NEVEREXTRACTABLE | SC_PKCS15_PRKEY_ACCESS_LOCAL; info->key_reference = ref; if (path) info->path = *path; return sc_pkcs15emu_add_object(p15card, type, label, info, auth_id, obj_flags); } /* SC_IMPLEMENT_DRIVER_VERSION("0.9.4") */
./CrossVul/dataset_final_sorted/CWE-119/c/good_343_5
crossvul-cpp_data_bad_3115_0
/* * HID driver for Corsair devices * * Supported devices: * - Vengeance K90 Keyboard * * Copyright (c) 2015 Clement Vuchener */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/hid.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/leds.h> #include "hid-ids.h" #define CORSAIR_USE_K90_MACRO (1<<0) #define CORSAIR_USE_K90_BACKLIGHT (1<<1) struct k90_led { struct led_classdev cdev; int brightness; struct work_struct work; bool removed; }; struct k90_drvdata { struct k90_led record_led; }; struct corsair_drvdata { unsigned long quirks; struct k90_drvdata *k90; struct k90_led *backlight; }; #define K90_GKEY_COUNT 18 static int corsair_usage_to_gkey(unsigned int usage) { /* G1 (0xd0) to G16 (0xdf) */ if (usage >= 0xd0 && usage <= 0xdf) return usage - 0xd0 + 1; /* G17 (0xe8) to G18 (0xe9) */ if (usage >= 0xe8 && usage <= 0xe9) return usage - 0xe8 + 17; return 0; } static unsigned short corsair_gkey_map[K90_GKEY_COUNT] = { BTN_TRIGGER_HAPPY1, BTN_TRIGGER_HAPPY2, BTN_TRIGGER_HAPPY3, BTN_TRIGGER_HAPPY4, BTN_TRIGGER_HAPPY5, BTN_TRIGGER_HAPPY6, BTN_TRIGGER_HAPPY7, BTN_TRIGGER_HAPPY8, BTN_TRIGGER_HAPPY9, BTN_TRIGGER_HAPPY10, BTN_TRIGGER_HAPPY11, BTN_TRIGGER_HAPPY12, BTN_TRIGGER_HAPPY13, BTN_TRIGGER_HAPPY14, BTN_TRIGGER_HAPPY15, BTN_TRIGGER_HAPPY16, BTN_TRIGGER_HAPPY17, BTN_TRIGGER_HAPPY18, }; module_param_array_named(gkey_codes, corsair_gkey_map, ushort, NULL, S_IRUGO); MODULE_PARM_DESC(gkey_codes, "Key codes for the G-keys"); static unsigned short corsair_record_keycodes[2] = { BTN_TRIGGER_HAPPY19, BTN_TRIGGER_HAPPY20 }; module_param_array_named(recordkey_codes, corsair_record_keycodes, ushort, NULL, S_IRUGO); MODULE_PARM_DESC(recordkey_codes, "Key codes for the MR (start and stop record) button"); static unsigned short corsair_profile_keycodes[3] = { BTN_TRIGGER_HAPPY21, BTN_TRIGGER_HAPPY22, BTN_TRIGGER_HAPPY23 }; module_param_array_named(profilekey_codes, corsair_profile_keycodes, ushort, NULL, S_IRUGO); MODULE_PARM_DESC(profilekey_codes, "Key codes for the profile buttons"); #define CORSAIR_USAGE_SPECIAL_MIN 0xf0 #define CORSAIR_USAGE_SPECIAL_MAX 0xff #define CORSAIR_USAGE_MACRO_RECORD_START 0xf6 #define CORSAIR_USAGE_MACRO_RECORD_STOP 0xf7 #define CORSAIR_USAGE_PROFILE 0xf1 #define CORSAIR_USAGE_M1 0xf1 #define CORSAIR_USAGE_M2 0xf2 #define CORSAIR_USAGE_M3 0xf3 #define CORSAIR_USAGE_PROFILE_MAX 0xf3 #define CORSAIR_USAGE_META_OFF 0xf4 #define CORSAIR_USAGE_META_ON 0xf5 #define CORSAIR_USAGE_LIGHT 0xfa #define CORSAIR_USAGE_LIGHT_OFF 0xfa #define CORSAIR_USAGE_LIGHT_DIM 0xfb #define CORSAIR_USAGE_LIGHT_MEDIUM 0xfc #define CORSAIR_USAGE_LIGHT_BRIGHT 0xfd #define CORSAIR_USAGE_LIGHT_MAX 0xfd /* USB control protocol */ #define K90_REQUEST_BRIGHTNESS 49 #define K90_REQUEST_MACRO_MODE 2 #define K90_REQUEST_STATUS 4 #define K90_REQUEST_GET_MODE 5 #define K90_REQUEST_PROFILE 20 #define K90_MACRO_MODE_SW 0x0030 #define K90_MACRO_MODE_HW 0x0001 #define K90_MACRO_LED_ON 0x0020 #define K90_MACRO_LED_OFF 0x0040 /* * LED class devices */ #define K90_BACKLIGHT_LED_SUFFIX "::backlight" #define K90_RECORD_LED_SUFFIX "::record" static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev) { int ret; struct k90_led *led = container_of(led_cdev, struct k90_led, cdev); struct device *dev = led->cdev.dev->parent; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int brightness; char data[8]; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); return -EIO; } brightness = data[4]; if (brightness < 0 || brightness > 3) { dev_warn(dev, "Read invalid backlight brightness: %02hhx.\n", data[4]); return -EIO; } return brightness; } static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) { struct k90_led *led = container_of(led_cdev, struct k90_led, cdev); return led->brightness; } static void k90_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct k90_led *led = container_of(led_cdev, struct k90_led, cdev); led->brightness = brightness; schedule_work(&led->work); } static void k90_backlight_work(struct work_struct *work) { int ret; struct k90_led *led = container_of(work, struct k90_led, work); struct device *dev; struct usb_interface *usbif; struct usb_device *usbdev; if (led->removed) return; dev = led->cdev.dev->parent; usbif = to_usb_interface(dev->parent); usbdev = interface_to_usbdev(usbif); ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), K90_REQUEST_BRIGHTNESS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, led->brightness, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret != 0) dev_warn(dev, "Failed to set backlight brightness (error: %d).\n", ret); } static void k90_record_led_work(struct work_struct *work) { int ret; struct k90_led *led = container_of(work, struct k90_led, work); struct device *dev; struct usb_interface *usbif; struct usb_device *usbdev; int value; if (led->removed) return; dev = led->cdev.dev->parent; usbif = to_usb_interface(dev->parent); usbdev = interface_to_usbdev(usbif); if (led->brightness > 0) value = K90_MACRO_LED_ON; else value = K90_MACRO_LED_OFF; ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), K90_REQUEST_MACRO_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret != 0) dev_warn(dev, "Failed to set record LED state (error: %d).\n", ret); } /* * Keyboard attributes */ static ssize_t k90_show_macro_mode(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); const char *macro_mode; char data[8]; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_GET_MODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", ret); return -EIO; } switch (data[0]) { case K90_MACRO_MODE_HW: macro_mode = "HW"; break; case K90_MACRO_MODE_SW: macro_mode = "SW"; break; default: dev_warn(dev, "K90 in unknown mode: %02hhx.\n", data[0]); return -EIO; } return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); } static ssize_t k90_store_macro_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); __u16 value; if (strncmp(buf, "SW", 2) == 0) value = K90_MACRO_MODE_SW; else if (strncmp(buf, "HW", 2) == 0) value = K90_MACRO_MODE_HW; else return -EINVAL; ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), K90_REQUEST_MACRO_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret != 0) { dev_warn(dev, "Failed to set macro mode.\n"); return ret; } return count; } static ssize_t k90_show_current_profile(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; char data[8]; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); return -EIO; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); return -EIO; } return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); } static ssize_t k90_store_current_profile(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int profile; if (kstrtoint(buf, 10, &profile)) return -EINVAL; if (profile < 1 || profile > 3) return -EINVAL; ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), K90_REQUEST_PROFILE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, profile, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret != 0) { dev_warn(dev, "Failed to change current profile (error %d).\n", ret); return ret; } return count; } static DEVICE_ATTR(macro_mode, 0644, k90_show_macro_mode, k90_store_macro_mode); static DEVICE_ATTR(current_profile, 0644, k90_show_current_profile, k90_store_current_profile); static struct attribute *k90_attrs[] = { &dev_attr_macro_mode.attr, &dev_attr_current_profile.attr, NULL }; static const struct attribute_group k90_attr_group = { .attrs = k90_attrs, }; /* * Driver functions */ static int k90_init_backlight(struct hid_device *dev) { int ret; struct corsair_drvdata *drvdata = hid_get_drvdata(dev); size_t name_sz; char *name; drvdata->backlight = kzalloc(sizeof(struct k90_led), GFP_KERNEL); if (!drvdata->backlight) { ret = -ENOMEM; goto fail_backlight_alloc; } name_sz = strlen(dev_name(&dev->dev)) + sizeof(K90_BACKLIGHT_LED_SUFFIX); name = kzalloc(name_sz, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto fail_name_alloc; } snprintf(name, name_sz, "%s" K90_BACKLIGHT_LED_SUFFIX, dev_name(&dev->dev)); drvdata->backlight->removed = false; drvdata->backlight->cdev.name = name; drvdata->backlight->cdev.max_brightness = 3; drvdata->backlight->cdev.brightness_set = k90_brightness_set; drvdata->backlight->cdev.brightness_get = k90_backlight_get; INIT_WORK(&drvdata->backlight->work, k90_backlight_work); ret = led_classdev_register(&dev->dev, &drvdata->backlight->cdev); if (ret != 0) goto fail_register_cdev; return 0; fail_register_cdev: kfree(drvdata->backlight->cdev.name); fail_name_alloc: kfree(drvdata->backlight); drvdata->backlight = NULL; fail_backlight_alloc: return ret; } static int k90_init_macro_functions(struct hid_device *dev) { int ret; struct corsair_drvdata *drvdata = hid_get_drvdata(dev); struct k90_drvdata *k90; size_t name_sz; char *name; k90 = kzalloc(sizeof(struct k90_drvdata), GFP_KERNEL); if (!k90) { ret = -ENOMEM; goto fail_drvdata; } drvdata->k90 = k90; /* Init LED device for record LED */ name_sz = strlen(dev_name(&dev->dev)) + sizeof(K90_RECORD_LED_SUFFIX); name = kzalloc(name_sz, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto fail_record_led_alloc; } snprintf(name, name_sz, "%s" K90_RECORD_LED_SUFFIX, dev_name(&dev->dev)); k90->record_led.removed = false; k90->record_led.cdev.name = name; k90->record_led.cdev.max_brightness = 1; k90->record_led.cdev.brightness_set = k90_brightness_set; k90->record_led.cdev.brightness_get = k90_record_led_get; INIT_WORK(&k90->record_led.work, k90_record_led_work); k90->record_led.brightness = 0; ret = led_classdev_register(&dev->dev, &k90->record_led.cdev); if (ret != 0) goto fail_record_led; /* Init attributes */ ret = sysfs_create_group(&dev->dev.kobj, &k90_attr_group); if (ret != 0) goto fail_sysfs; return 0; fail_sysfs: k90->record_led.removed = true; led_classdev_unregister(&k90->record_led.cdev); cancel_work_sync(&k90->record_led.work); fail_record_led: kfree(k90->record_led.cdev.name); fail_record_led_alloc: kfree(k90); fail_drvdata: drvdata->k90 = NULL; return ret; } static void k90_cleanup_backlight(struct hid_device *dev) { struct corsair_drvdata *drvdata = hid_get_drvdata(dev); if (drvdata->backlight) { drvdata->backlight->removed = true; led_classdev_unregister(&drvdata->backlight->cdev); cancel_work_sync(&drvdata->backlight->work); kfree(drvdata->backlight->cdev.name); kfree(drvdata->backlight); } } static void k90_cleanup_macro_functions(struct hid_device *dev) { struct corsair_drvdata *drvdata = hid_get_drvdata(dev); struct k90_drvdata *k90 = drvdata->k90; if (k90) { sysfs_remove_group(&dev->dev.kobj, &k90_attr_group); k90->record_led.removed = true; led_classdev_unregister(&k90->record_led.cdev); cancel_work_sync(&k90->record_led.work); kfree(k90->record_led.cdev.name); kfree(k90); } } static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) { int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; struct usb_interface *usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; drvdata->quirks = quirks; hid_set_drvdata(dev, drvdata); ret = hid_parse(dev); if (ret != 0) { hid_err(dev, "parse failed\n"); return ret; } ret = hid_hw_start(dev, HID_CONNECT_DEFAULT); if (ret != 0) { hid_err(dev, "hw start failed\n"); return ret; } if (usbif->cur_altsetting->desc.bInterfaceNumber == 0) { if (quirks & CORSAIR_USE_K90_MACRO) { ret = k90_init_macro_functions(dev); if (ret != 0) hid_warn(dev, "Failed to initialize K90 macro functions.\n"); } if (quirks & CORSAIR_USE_K90_BACKLIGHT) { ret = k90_init_backlight(dev); if (ret != 0) hid_warn(dev, "Failed to initialize K90 backlight.\n"); } } return 0; } static void corsair_remove(struct hid_device *dev) { k90_cleanup_macro_functions(dev); k90_cleanup_backlight(dev); hid_hw_stop(dev); } static int corsair_event(struct hid_device *dev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct corsair_drvdata *drvdata = hid_get_drvdata(dev); if (!drvdata->k90) return 0; switch (usage->hid & HID_USAGE) { case CORSAIR_USAGE_MACRO_RECORD_START: drvdata->k90->record_led.brightness = 1; break; case CORSAIR_USAGE_MACRO_RECORD_STOP: drvdata->k90->record_led.brightness = 0; break; default: break; } return 0; } static int corsair_input_mapping(struct hid_device *dev, struct hid_input *input, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { int gkey; if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD) return 0; gkey = corsair_usage_to_gkey(usage->hid & HID_USAGE); if (gkey != 0) { hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_gkey_map[gkey - 1]); return 1; } if ((usage->hid & HID_USAGE) >= CORSAIR_USAGE_SPECIAL_MIN && (usage->hid & HID_USAGE) <= CORSAIR_USAGE_SPECIAL_MAX) { switch (usage->hid & HID_USAGE) { case CORSAIR_USAGE_MACRO_RECORD_START: hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_record_keycodes[0]); return 1; case CORSAIR_USAGE_MACRO_RECORD_STOP: hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_record_keycodes[1]); return 1; case CORSAIR_USAGE_M1: hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_profile_keycodes[0]); return 1; case CORSAIR_USAGE_M2: hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_profile_keycodes[1]); return 1; case CORSAIR_USAGE_M3: hid_map_usage_clear(input, usage, bit, max, EV_KEY, corsair_profile_keycodes[2]); return 1; default: return -1; } } return 0; } static const struct hid_device_id corsair_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), .driver_data = CORSAIR_USE_K90_MACRO | CORSAIR_USE_K90_BACKLIGHT }, {} }; MODULE_DEVICE_TABLE(hid, corsair_devices); static struct hid_driver corsair_driver = { .name = "corsair", .id_table = corsair_devices, .probe = corsair_probe, .event = corsair_event, .remove = corsair_remove, .input_mapping = corsair_input_mapping, }; module_hid_driver(corsair_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Clement Vuchener"); MODULE_DESCRIPTION("HID driver for Corsair devices");
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3115_0
crossvul-cpp_data_bad_3540_0
/* * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include <linux/debugfs.h> #include <linux/slab.h> #include "icmp_socket.h" #include "send.h" #include "hash.h" #include "originator.h" #include "hard-interface.h" static struct socket_client *socket_client_hash[256]; static void bat_socket_add_packet(struct socket_client *socket_client, struct icmp_packet_rr *icmp_packet, size_t icmp_len); void bat_socket_init(void) { memset(socket_client_hash, 0, sizeof(socket_client_hash)); } static int bat_socket_open(struct inode *inode, struct file *file) { unsigned int i; struct socket_client *socket_client; nonseekable_open(inode, file); socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); if (!socket_client) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) { if (!socket_client_hash[i]) { socket_client_hash[i] = socket_client; break; } } if (i == ARRAY_SIZE(socket_client_hash)) { pr_err("Error - can't add another packet client: " "maximum number of clients reached\n"); kfree(socket_client); return -EXFULL; } INIT_LIST_HEAD(&socket_client->queue_list); socket_client->queue_len = 0; socket_client->index = i; socket_client->bat_priv = inode->i_private; spin_lock_init(&socket_client->lock); init_waitqueue_head(&socket_client->queue_wait); file->private_data = socket_client; inc_module_count(); return 0; } static int bat_socket_release(struct inode *inode, struct file *file) { struct socket_client *socket_client = file->private_data; struct socket_packet *socket_packet; struct list_head *list_pos, *list_pos_tmp; spin_lock_bh(&socket_client->lock); /* for all packets in the queue ... */ list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { socket_packet = list_entry(list_pos, struct socket_packet, list); list_del(list_pos); kfree(socket_packet); } socket_client_hash[socket_client->index] = NULL; spin_unlock_bh(&socket_client->lock); kfree(socket_client); dec_module_count(); return 0; } static ssize_t bat_socket_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct socket_client *socket_client = file->private_data; struct socket_packet *socket_packet; size_t packet_len; int error; if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) return -EAGAIN; if ((!buf) || (count < sizeof(struct icmp_packet))) return -EINVAL; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; error = wait_event_interruptible(socket_client->queue_wait, socket_client->queue_len); if (error) return error; spin_lock_bh(&socket_client->lock); socket_packet = list_first_entry(&socket_client->queue_list, struct socket_packet, list); list_del(&socket_packet->list); socket_client->queue_len--; spin_unlock_bh(&socket_client->lock); error = copy_to_user(buf, &socket_packet->icmp_packet, socket_packet->icmp_len); packet_len = socket_packet->icmp_len; kfree(socket_packet); if (error) return -EFAULT; return packet_len; } static ssize_t bat_socket_write(struct file *file, const char __user *buff, size_t len, loff_t *off) { struct socket_client *socket_client = file->private_data; struct bat_priv *bat_priv = socket_client->bat_priv; struct hard_iface *primary_if = NULL; struct sk_buff *skb; struct icmp_packet_rr *icmp_packet; struct orig_node *orig_node = NULL; struct neigh_node *neigh_node = NULL; size_t packet_len = sizeof(struct icmp_packet); if (len < sizeof(struct icmp_packet)) { bat_dbg(DBG_BATMAN, bat_priv, "Error - can't send packet from char device: " "invalid packet size\n"); return -EINVAL; } primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { len = -EFAULT; goto out; } if (len >= sizeof(struct icmp_packet_rr)) packet_len = sizeof(struct icmp_packet_rr); skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); if (!skb) { len = -ENOMEM; goto out; } skb_reserve(skb, sizeof(struct ethhdr)); icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); if (copy_from_user(icmp_packet, buff, packet_len)) { len = -EFAULT; goto free_skb; } if (icmp_packet->packet_type != BAT_ICMP) { bat_dbg(DBG_BATMAN, bat_priv, "Error - can't send packet from char device: " "got bogus packet type (expected: BAT_ICMP)\n"); len = -EINVAL; goto free_skb; } if (icmp_packet->msg_type != ECHO_REQUEST) { bat_dbg(DBG_BATMAN, bat_priv, "Error - can't send packet from char device: " "got bogus message type (expected: ECHO_REQUEST)\n"); len = -EINVAL; goto free_skb; } icmp_packet->uid = socket_client->index; if (icmp_packet->version != COMPAT_VERSION) { icmp_packet->msg_type = PARAMETER_PROBLEM; icmp_packet->version = COMPAT_VERSION; bat_socket_add_packet(socket_client, icmp_packet, packet_len); goto free_skb; } if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dst_unreach; orig_node = orig_hash_find(bat_priv, icmp_packet->dst); if (!orig_node) goto dst_unreach; neigh_node = orig_node_get_router(orig_node); if (!neigh_node) goto dst_unreach; if (!neigh_node->if_incoming) goto dst_unreach; if (neigh_node->if_incoming->if_status != IF_ACTIVE) goto dst_unreach; memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); if (packet_len == sizeof(struct icmp_packet_rr)) memcpy(icmp_packet->rr, neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); goto out; dst_unreach: icmp_packet->msg_type = DESTINATION_UNREACHABLE; bat_socket_add_packet(socket_client, icmp_packet, packet_len); free_skb: kfree_skb(skb); out: if (primary_if) hardif_free_ref(primary_if); if (neigh_node) neigh_node_free_ref(neigh_node); if (orig_node) orig_node_free_ref(orig_node); return len; } static unsigned int bat_socket_poll(struct file *file, poll_table *wait) { struct socket_client *socket_client = file->private_data; poll_wait(file, &socket_client->queue_wait, wait); if (socket_client->queue_len > 0) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations fops = { .owner = THIS_MODULE, .open = bat_socket_open, .release = bat_socket_release, .read = bat_socket_read, .write = bat_socket_write, .poll = bat_socket_poll, .llseek = no_llseek, }; int bat_socket_setup(struct bat_priv *bat_priv) { struct dentry *d; if (!bat_priv->debug_dir) goto err; d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, bat_priv->debug_dir, bat_priv, &fops); if (d) goto err; return 0; err: return 1; } static void bat_socket_add_packet(struct socket_client *socket_client, struct icmp_packet_rr *icmp_packet, size_t icmp_len) { struct socket_packet *socket_packet; socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC); if (!socket_packet) return; INIT_LIST_HEAD(&socket_packet->list); memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len); socket_packet->icmp_len = icmp_len; spin_lock_bh(&socket_client->lock); /* while waiting for the lock the socket_client could have been * deleted */ if (!socket_client_hash[icmp_packet->uid]) { spin_unlock_bh(&socket_client->lock); kfree(socket_packet); return; } list_add_tail(&socket_packet->list, &socket_client->queue_list); socket_client->queue_len++; if (socket_client->queue_len > 100) { socket_packet = list_first_entry(&socket_client->queue_list, struct socket_packet, list); list_del(&socket_packet->list); kfree(socket_packet); socket_client->queue_len--; } spin_unlock_bh(&socket_client->lock); wake_up(&socket_client->queue_wait); } void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, size_t icmp_len) { struct socket_client *hash = socket_client_hash[icmp_packet->uid]; if (hash) bat_socket_add_packet(hash, icmp_packet, icmp_len); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_3540_0
crossvul-cpp_data_good_5733_2
/* * Copyright (c) 2012 Fredrik Mellbin * Copyright (c) 2013 Clément Bœsch * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Fieldmatching filter, ported from VFM filter (VapouSsynth) by Clément. * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone * (tritical), the original author. * * @see http://bengal.missouri.edu/~kes25c/ * @see http://www.vapoursynth.com/about/ */ #include <inttypes.h> #include "libavutil/avassert.h" #include "libavutil/imgutils.h" #include "libavutil/opt.h" #include "libavutil/timestamp.h" #include "avfilter.h" #include "internal.h" #define INPUT_MAIN 0 #define INPUT_CLEANSRC 1 enum fieldmatch_parity { FM_PARITY_AUTO = -1, FM_PARITY_BOTTOM = 0, FM_PARITY_TOP = 1, }; enum matching_mode { MODE_PC, MODE_PC_N, MODE_PC_U, MODE_PC_N_UB, MODE_PCN, MODE_PCN_UB, NB_MODE }; enum comb_matching_mode { COMBMATCH_NONE, COMBMATCH_SC, COMBMATCH_FULL, NB_COMBMATCH }; enum comb_dbg { COMBDBG_NONE, COMBDBG_PCN, COMBDBG_PCNUB, NB_COMBDBG }; typedef struct { const AVClass *class; AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream int got_frame[2]; ///< frame request flag for each input stream int hsub, vsub; ///< chroma subsampling values uint32_t eof; ///< bitmask for end of stream int64_t lastscdiff; int64_t lastn; /* options */ int order; int ppsrc; enum matching_mode mode; int field; int mchroma; int y0, y1; int64_t scthresh; double scthresh_flt; enum comb_matching_mode combmatch; int combdbg; int cthresh; int chroma; int blockx, blocky; int combpel; /* misc buffers */ uint8_t *map_data[4]; int map_linesize[4]; uint8_t *cmask_data[4]; int cmask_linesize[4]; int *c_array; int tpitchy, tpitchuv; uint8_t *tbuffer; } FieldMatchContext; #define OFFSET(x) offsetof(FieldMatchContext, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM static const AVOption fieldmatch_options[] = { { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" }, { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" }, { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" }, { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" }, { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" }, { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" }, { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS }, { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" }, { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" }, { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" }, { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" }, { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS }, { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS }, { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" }, { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" }, { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" }, { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" }, { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" }, { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" }, { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" }, { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" }, { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS }, { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64= 0}, 0, 1, FLAGS }, { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS }, { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS }, { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS }, { NULL } }; AVFILTER_DEFINE_CLASS(fieldmatch); static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane) { return plane ? FF_CEIL_RSHIFT(f->width, fm->hsub) : f->width; } static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane) { return plane ? FF_CEIL_RSHIFT(f->height, fm->vsub) : f->height; } static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2) { int x, y; const uint8_t *srcp1 = f1->data[0]; const uint8_t *srcp2 = f2->data[0]; const int src1_linesize = f1->linesize[0]; const int src2_linesize = f2->linesize[0]; const int width = f1->width; const int height = f1->height; int64_t acc = 0; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) acc += abs(srcp1[x] - srcp2[x]); srcp1 += src1_linesize; srcp2 += src2_linesize; } return acc; } static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v) { int y; for (y = 0; y < h; y++) { memset(data, v, w); data += linesize; } } static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src) { int x, y, plane, max_v = 0; const int cthresh = fm->cthresh; const int cthresh6 = cthresh * 6; for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) { const uint8_t *srcp = src->data[plane]; const int src_linesize = src->linesize[plane]; const int width = get_width (fm, src, plane); const int height = get_height(fm, src, plane); uint8_t *cmkp = fm->cmask_data[plane]; const int cmk_linesize = fm->cmask_linesize[plane]; if (cthresh < 0) { fill_buf(cmkp, width, height, cmk_linesize, 0xff); continue; } fill_buf(cmkp, width, height, cmk_linesize, 0); /* [1 -3 4 -3 1] vertical filter */ #define FILTER(xm2, xm1, xp1, xp2) \ abs( 4 * srcp[x] \ -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \ + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6 /* first line */ for (x = 0; x < width; x++) { const int s1 = abs(srcp[x] - srcp[x + src_linesize]); if (s1 > cthresh && FILTER(2, 1, 1, 2)) cmkp[x] = 0xff; } srcp += src_linesize; cmkp += cmk_linesize; /* second line */ for (x = 0; x < width; x++) { const int s1 = abs(srcp[x] - srcp[x - src_linesize]); const int s2 = abs(srcp[x] - srcp[x + src_linesize]); if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2)) cmkp[x] = 0xff; } srcp += src_linesize; cmkp += cmk_linesize; /* all lines minus first two and last two */ for (y = 2; y < height-2; y++) { for (x = 0; x < width; x++) { const int s1 = abs(srcp[x] - srcp[x - src_linesize]); const int s2 = abs(srcp[x] - srcp[x + src_linesize]); if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2)) cmkp[x] = 0xff; } srcp += src_linesize; cmkp += cmk_linesize; } /* before-last line */ for (x = 0; x < width; x++) { const int s1 = abs(srcp[x] - srcp[x - src_linesize]); const int s2 = abs(srcp[x] - srcp[x + src_linesize]); if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2)) cmkp[x] = 0xff; } srcp += src_linesize; cmkp += cmk_linesize; /* last line */ for (x = 0; x < width; x++) { const int s1 = abs(srcp[x] - srcp[x - src_linesize]); if (s1 > cthresh && FILTER(-2, -1, -1, -2)) cmkp[x] = 0xff; } } if (fm->chroma) { uint8_t *cmkp = fm->cmask_data[0]; uint8_t *cmkpU = fm->cmask_data[1]; uint8_t *cmkpV = fm->cmask_data[2]; const int width = FF_CEIL_RSHIFT(src->width, fm->hsub); const int height = FF_CEIL_RSHIFT(src->height, fm->vsub); const int cmk_linesize = fm->cmask_linesize[0] << 1; const int cmk_linesizeUV = fm->cmask_linesize[2]; uint8_t *cmkpp = cmkp - (cmk_linesize>>1); uint8_t *cmkpn = cmkp + (cmk_linesize>>1); uint8_t *cmkpnn = cmkp + cmk_linesize; for (y = 1; y < height - 1; y++) { cmkpp += cmk_linesize; cmkp += cmk_linesize; cmkpn += cmk_linesize; cmkpnn += cmk_linesize; cmkpV += cmk_linesizeUV; cmkpU += cmk_linesizeUV; for (x = 1; x < width - 1; x++) { #define HAS_FF_AROUND(p, lz) (p[x-1 - lz] == 0xff || p[x - lz] == 0xff || p[x+1 - lz] == 0xff || \ p[x-1 ] == 0xff || p[x+1 ] == 0xff || \ p[x-1 + lz] == 0xff || p[x + lz] == 0xff || p[x+1 + lz] == 0xff) if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) || (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) { ((uint16_t*)cmkp)[x] = 0xffff; ((uint16_t*)cmkpn)[x] = 0xffff; if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff; else ((uint16_t*)cmkpnn)[x] = 0xffff; } } } } { const int blockx = fm->blockx; const int blocky = fm->blocky; const int xhalf = blockx/2; const int yhalf = blocky/2; const int cmk_linesize = fm->cmask_linesize[0]; const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize; const int width = src->width; const int height = src->height; const int xblocks = ((width+xhalf)/blockx) + 1; const int xblocks4 = xblocks<<2; const int yblocks = ((height+yhalf)/blocky) + 1; int *c_array = fm->c_array; const int arraysize = (xblocks*yblocks)<<2; int heighta = (height/(blocky/2))*(blocky/2); const int widtha = (width /(blockx/2))*(blockx/2); if (heighta == height) heighta = height - yhalf; memset(c_array, 0, arraysize * sizeof(*c_array)); #define C_ARRAY_ADD(v) do { \ const int box1 = (x / blockx) * 4; \ const int box2 = ((x + xhalf) / blockx) * 4; \ c_array[temp1 + box1 ] += v; \ c_array[temp1 + box2 + 1] += v; \ c_array[temp2 + box1 + 2] += v; \ c_array[temp2 + box2 + 3] += v; \ } while (0) #define VERTICAL_HALF(y_start, y_end) do { \ for (y = y_start; y < y_end; y++) { \ const int temp1 = (y / blocky) * xblocks4; \ const int temp2 = ((y + yhalf) / blocky) * xblocks4; \ for (x = 0; x < width; x++) \ if (cmkp[x - cmk_linesize] == 0xff && \ cmkp[x ] == 0xff && \ cmkp[x + cmk_linesize] == 0xff) \ C_ARRAY_ADD(1); \ cmkp += cmk_linesize; \ } \ } while (0) VERTICAL_HALF(1, yhalf); for (y = yhalf; y < heighta; y += yhalf) { const int temp1 = (y / blocky) * xblocks4; const int temp2 = ((y + yhalf) / blocky) * xblocks4; for (x = 0; x < widtha; x += xhalf) { const uint8_t *cmkp_tmp = cmkp + x; int u, v, sum = 0; for (u = 0; u < yhalf; u++) { for (v = 0; v < xhalf; v++) if (cmkp_tmp[v - cmk_linesize] == 0xff && cmkp_tmp[v ] == 0xff && cmkp_tmp[v + cmk_linesize] == 0xff) sum++; cmkp_tmp += cmk_linesize; } if (sum) C_ARRAY_ADD(sum); } for (x = widtha; x < width; x++) { const uint8_t *cmkp_tmp = cmkp + x; int u, sum = 0; for (u = 0; u < yhalf; u++) { if (cmkp_tmp[-cmk_linesize] == 0xff && cmkp_tmp[ 0] == 0xff && cmkp_tmp[ cmk_linesize] == 0xff) sum++; cmkp_tmp += cmk_linesize; } if (sum) C_ARRAY_ADD(sum); } cmkp += cmk_linesize * yhalf; } VERTICAL_HALF(heighta, height - 1); for (x = 0; x < arraysize; x++) if (c_array[x] > max_v) max_v = c_array[x]; } return max_v; } // the secret is that tbuffer is an interlaced, offset subset of all the lines static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height) { int y, x; prvp -= prv_linesize; nxtp -= nxt_linesize; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) tbuffer[x] = FFABS(prvp[x] - nxtp[x]); prvp += prv_linesize; nxtp += nxt_linesize; tbuffer += tbuf_linesize; } } /** * Build a map over which pixels differ a lot/a little */ static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane) { int x, y, u, diff, count; int tpitch = plane ? fm->tpitchuv : fm->tpitchy; const uint8_t *dp = fm->tbuffer + tpitch; build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize, fm->tbuffer, tpitch, width, height>>1); for (y = 2; y < height - 2; y += 2) { for (x = 1; x < width - 1; x++) { diff = dp[x]; if (diff > 3) { for (count = 0, u = x-1; u < x+2 && count < 2; u++) { count += dp[u-tpitch] > 3; count += dp[u ] > 3; count += dp[u+tpitch] > 3; } if (count > 1) { dstp[x] = 1; if (diff > 19) { int upper = 0, lower = 0; for (count = 0, u = x-1; u < x+2 && count < 6; u++) { if (dp[u-tpitch] > 19) { count++; upper = 1; } if (dp[u ] > 19) count++; if (dp[u+tpitch] > 19) { count++; lower = 1; } } if (count > 3) { if (upper && lower) { dstp[x] |= 1<<1; } else { int upper2 = 0, lower2 = 0; for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) { if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1; if ( dp[u- tpitch] > 19) upper = 1; if ( dp[u+ tpitch] > 19) lower = 1; if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1; } if ((upper && (lower || upper2)) || (lower && (upper || lower2))) dstp[x] |= 1<<1; else if (count > 5) dstp[x] |= 1<<2; } } } } } } dp += tpitch; dstp += dst_linesize; } } enum { mP, mC, mN, mB, mU }; static int get_field_base(int match, int field) { return match < 3 ? 2 - field : 1 + field; } static AVFrame *select_frame(FieldMatchContext *fm, int match) { if (match == mP || match == mB) return fm->prv; else if (match == mN || match == mU) return fm->nxt; else /* match == mC */ return fm->src; } static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field) { int plane, ret; uint64_t accumPc = 0, accumPm = 0, accumPml = 0; uint64_t accumNc = 0, accumNm = 0, accumNml = 0; int norm1, norm2, mtn1, mtn2; float c1, c2, mr; const AVFrame *src = fm->src; for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) { int x, y, temp1, temp2, fbase; const AVFrame *prev, *next; uint8_t *mapp = fm->map_data[plane]; int map_linesize = fm->map_linesize[plane]; const uint8_t *srcp = src->data[plane]; const int src_linesize = src->linesize[plane]; const int srcf_linesize = src_linesize << 1; int prv_linesize, nxt_linesize; int prvf_linesize, nxtf_linesize; const int width = get_width (fm, src, plane); const int height = get_height(fm, src, plane); const int y0a = fm->y0 >> (plane != 0); const int y1a = fm->y1 >> (plane != 0); const int startx = (plane == 0 ? 8 : 4); const int stopx = width - startx; const uint8_t *srcpf, *srcf, *srcnf; const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf; fill_buf(mapp, width, height, map_linesize, 0); /* match1 */ fbase = get_field_base(match1, field); srcf = srcp + (fbase + 1) * src_linesize; srcpf = srcf - srcf_linesize; srcnf = srcf + srcf_linesize; mapp = mapp + fbase * map_linesize; prev = select_frame(fm, match1); prv_linesize = prev->linesize[plane]; prvf_linesize = prv_linesize << 1; prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field prvnf = prvpf + prvf_linesize; // previous frame, next field /* match2 */ fbase = get_field_base(match2, field); next = select_frame(fm, match2); nxt_linesize = next->linesize[plane]; nxtf_linesize = nxt_linesize << 1; nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field nxtnf = nxtpf + nxtf_linesize; // next frame, next field map_linesize <<= 1; if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1)) build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize, mapp, map_linesize, height, width, plane); else build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize, mapp + map_linesize, map_linesize, height, width, plane); for (y = 2; y < height - 2; y += 2) { if (y0a == y1a || y < y0a || y > y1a) { for (x = startx; x < stopx; x++) { if (mapp[x] > 0 || mapp[x + map_linesize] > 0) { temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1] temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1); if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1))) accumPc += temp2; if (temp2 > 42) { if ((mapp[x]&2) || (mapp[x + map_linesize]&2)) accumPm += temp2; if ((mapp[x]&4) || (mapp[x + map_linesize]&4)) accumPml += temp2; } temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1); if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1))) accumNc += temp2; if (temp2 > 42) { if ((mapp[x]&2) || (mapp[x + map_linesize]&2)) accumNm += temp2; if ((mapp[x]&4) || (mapp[x + map_linesize]&4)) accumNml += temp2; } } } } prvpf += prvf_linesize; prvnf += prvf_linesize; srcpf += srcf_linesize; srcf += srcf_linesize; srcnf += srcf_linesize; nxtpf += nxtf_linesize; nxtnf += nxtf_linesize; mapp += map_linesize; } } if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) && FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) { accumPm = accumPml; accumNm = accumNml; } norm1 = (int)((accumPc / 6.0f) + 0.5f); norm2 = (int)((accumNc / 6.0f) + 0.5f); mtn1 = (int)((accumPm / 6.0f) + 0.5f); mtn2 = (int)((accumNm / 6.0f) + 0.5f); c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1)); c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1)); mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1)); if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) || ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) || ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) || ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1)) ret = mtn1 > mtn2 ? match2 : match1; else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ret = mtn1 > mtn2 ? match2 : match1; else ret = norm1 > norm2 ? match2 : match1; return ret; } static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field) { int plane; for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++) av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1, src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1, get_width(fm, src, plane), get_height(fm, src, plane) / 2); } static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt) { AVFrame *dst; FieldMatchContext *fm = ctx->priv; if (match == mC) { dst = av_frame_clone(src); } else { AVFilterLink *outlink = ctx->outputs[0]; dst = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!dst) return NULL; av_frame_copy_props(dst, src); switch (match) { case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break; case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break; case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break; case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break; default: av_assert0(0); } } return dst; } static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field) { const FieldMatchContext *fm = ctx->priv; #define LOAD_COMB(mid) do { \ if (combs[mid] < 0) { \ if (!gen_frames[mid]) \ gen_frames[mid] = create_weave_frame(ctx, mid, field, \ fm->prv, fm->src, fm->nxt); \ combs[mid] = calc_combed_score(fm, gen_frames[mid]); \ } \ } while (0) LOAD_COMB(m1); LOAD_COMB(m2); if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) && abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel) return m2; else return m1; } static const int fxo0m[] = { mP, mC, mN, mB, mU }; static const int fxo1m[] = { mN, mC, mP, mU, mB }; static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; FieldMatchContext *fm = ctx->priv; int combs[] = { -1, -1, -1, -1, -1 }; int order, field, i, match, sc = 0; const int *fxo; AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL }; AVFrame *dst; /* update frames queue(s) */ #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \ if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \ av_frame_free(&prv); \ prv = src; \ src = nxt; \ if (in) \ nxt = in; \ if (!prv) \ prv = src; \ if (!prv) /* received only one frame at that point */ \ return 0; \ av_assert0(prv && src && nxt); \ } while (0) if (FF_INLINK_IDX(inlink) == INPUT_MAIN) { SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt); fm->got_frame[INPUT_MAIN] = 1; } else { SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2); fm->got_frame[INPUT_CLEANSRC] = 1; } if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC])) return 0; fm->got_frame[INPUT_MAIN] = fm->got_frame[INPUT_CLEANSRC] = 0; in = fm->src; /* parity */ order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1); field = fm->field != FM_PARITY_AUTO ? fm->field : order; av_assert0(order == 0 || order == 1 || field == 0 || field == 1); fxo = field ^ order ? fxo1m : fxo0m; /* debug mode: we generate all the fields combinations and their associated * combed score. XXX: inject as frame metadata? */ if (fm->combdbg) { for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) { if (i > mN && fm->combdbg == COMBDBG_PCN) break; gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt); if (!gen_frames[i]) return AVERROR(ENOMEM); combs[i] = calc_combed_score(fm, gen_frames[i]); } av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n", combs[0], combs[1], combs[2], combs[3], combs[4]); } else { gen_frames[mC] = av_frame_clone(fm->src); if (!gen_frames[mC]) return AVERROR(ENOMEM); } /* p/c selection and optional 3-way p/c/n matches */ match = compare_fields(fm, fxo[mC], fxo[mP], field); if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB) match = compare_fields(fm, match, fxo[mN], field); /* scene change check */ if (fm->combmatch == COMBMATCH_SC) { if (fm->lastn == outlink->frame_count - 1) { if (fm->lastscdiff > fm->scthresh) sc = 1; } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) { sc = 1; } if (!sc) { fm->lastn = outlink->frame_count; fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt); sc = fm->lastscdiff > fm->scthresh; } } if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) { switch (fm->mode) { /* 2-way p/c matches */ case MODE_PC: match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field); break; case MODE_PC_N: match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field); break; case MODE_PC_U: match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field); break; case MODE_PC_N_UB: match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field); match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field); match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field); break; /* 3-way p/c/n matches */ case MODE_PCN: match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field); break; case MODE_PCN_UB: match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field); match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field); break; default: av_assert0(0); } } /* get output frame and drop the others */ if (fm->ppsrc) { /* field matching was based on a filtered/post-processed input, we now * pick the untouched fields from the clean source */ dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2); } else { if (!gen_frames[match]) { // XXX: is that possible? dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt); } else { dst = gen_frames[match]; gen_frames[match] = NULL; } } if (!dst) return AVERROR(ENOMEM); for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++) av_frame_free(&gen_frames[i]); /* mark the frame we are unable to match properly as interlaced so a proper * de-interlacer can take the relay */ dst->interlaced_frame = combs[match] >= fm->combpel; if (dst->interlaced_frame) { av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n", outlink->frame_count, av_ts2timestr(in->pts, &inlink->time_base)); dst->top_field_first = field; } av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)" " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4], fm->combpel, match, dst->interlaced_frame ? "YES" : "NO"); return ff_filter_frame(outlink, dst); } static int request_inlink(AVFilterContext *ctx, int lid) { int ret = 0; FieldMatchContext *fm = ctx->priv; if (!fm->got_frame[lid]) { AVFilterLink *inlink = ctx->inputs[lid]; ret = ff_request_frame(inlink); if (ret == AVERROR_EOF) { // flushing fm->eof |= 1 << lid; ret = filter_frame(inlink, NULL); } } return ret; } static int request_frame(AVFilterLink *outlink) { int ret; AVFilterContext *ctx = outlink->src; FieldMatchContext *fm = ctx->priv; const uint32_t eof_mask = 1<<INPUT_MAIN | fm->ppsrc<<INPUT_CLEANSRC; if ((fm->eof & eof_mask) == eof_mask) // flush done? return AVERROR_EOF; if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0) return ret; if (fm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0) return ret; return 0; } static int query_formats(AVFilterContext *ctx) { // TODO: second input source can support >8bit depth static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_NONE }; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } static int config_input(AVFilterLink *inlink) { int ret; AVFilterContext *ctx = inlink->dst; FieldMatchContext *fm = ctx->priv; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); const int w = inlink->w; const int h = inlink->h; fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0); if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 || (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0) return ret; fm->hsub = pix_desc->log2_chroma_w; fm->vsub = pix_desc->log2_chroma_h; fm->tpitchy = FFALIGN(w, 16); fm->tpitchuv = FFALIGN(w >> 1, 16); fm->tbuffer = av_malloc(h/2 * fm->tpitchy); fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) * (((h + fm->blocky/2)/fm->blocky)+1) * 4 * sizeof(*fm->c_array)); if (!fm->tbuffer || !fm->c_array) return AVERROR(ENOMEM); return 0; } static av_cold int fieldmatch_init(AVFilterContext *ctx) { const FieldMatchContext *fm = ctx->priv; AVFilterPad pad = { .name = av_strdup("main"), .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_input, }; if (!pad.name) return AVERROR(ENOMEM); ff_insert_inpad(ctx, INPUT_MAIN, &pad); if (fm->ppsrc) { pad.name = av_strdup("clean_src"); pad.config_props = NULL; if (!pad.name) return AVERROR(ENOMEM); ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad); } if ((fm->blockx & (fm->blockx - 1)) || (fm->blocky & (fm->blocky - 1))) { av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n"); return AVERROR(EINVAL); } if (fm->combpel > fm->blockx * fm->blocky) { av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n"); return AVERROR(EINVAL); } return 0; } static av_cold void fieldmatch_uninit(AVFilterContext *ctx) { int i; FieldMatchContext *fm = ctx->priv; if (fm->prv != fm->src) av_frame_free(&fm->prv); if (fm->nxt != fm->src) av_frame_free(&fm->nxt); av_frame_free(&fm->src); av_freep(&fm->map_data[0]); av_freep(&fm->cmask_data[0]); av_freep(&fm->tbuffer); av_freep(&fm->c_array); for (i = 0; i < ctx->nb_inputs; i++) av_freep(&ctx->input_pads[i].name); } static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; const FieldMatchContext *fm = ctx->priv; const AVFilterLink *inlink = ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN]; outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP; outlink->time_base = inlink->time_base; outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; outlink->frame_rate = inlink->frame_rate; outlink->w = inlink->w; outlink->h = inlink->h; return 0; } static const AVFilterPad fieldmatch_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .request_frame = request_frame, .config_props = config_output, }, { NULL } }; AVFilter avfilter_vf_fieldmatch = { .name = "fieldmatch", .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."), .query_formats = query_formats, .priv_size = sizeof(FieldMatchContext), .init = fieldmatch_init, .uninit = fieldmatch_uninit, .inputs = NULL, .outputs = fieldmatch_outputs, .priv_class = &fieldmatch_class, .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, };
./CrossVul/dataset_final_sorted/CWE-119/c/good_5733_2
crossvul-cpp_data_good_340_3
/* * card-tcos.c: Support for TCOS cards * * Copyright (C) 2011 Peter Koch <pk@opensc-project.org> * Copyright (C) 2002 g10 Code GmbH * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include <ctype.h> #include <time.h> #include <stdlib.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static struct sc_atr_table tcos_atrs[] = { /* Infineon SLE44 */ { "3B:BA:13:00:81:31:86:5D:00:64:05:0A:02:01:31:80:90:00:8B", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66S */ { "3B:BA:14:00:81:31:86:5D:00:64:05:14:02:02:31:80:90:00:91", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX320P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:60:02:03:31:80:90:00:66", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX322P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:7B:02:03:31:80:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Philips P5CT072 */ { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:03:01:31:C0:73:F7:01:D0:00:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:04:0F:31:C0:73:F7:01:D0:00:90:00:74", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, /* Philips P5CT080 */ { "3B:BF:B6:00:81:31:FE:5D:00:64:04:28:03:02:31:C0:73:F7:01:D0:00:90:00:67", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; static struct sc_card_operations tcos_ops; static struct sc_card_driver tcos_drv = { "TCOS 3.0", "tcos", &tcos_ops, NULL, 0, NULL }; static const struct sc_card_operations *iso_ops = NULL; typedef struct tcos_data_st { unsigned int pad_flags; unsigned int next_sign; } tcos_data; static int tcos_finish(sc_card_t *card) { free(card->drv_data); return 0; } static int tcos_match_card(sc_card_t *card) { int i; i = _sc_match_atr(card, tcos_atrs, &card->type); if (i < 0) return 0; return 1; } static int tcos_init(sc_card_t *card) { unsigned long flags; tcos_data *data = malloc(sizeof(tcos_data)); if (!data) return SC_ERROR_OUT_OF_MEMORY; card->name = "TCOS"; card->drv_data = (void *)data; card->cla = 0x00; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_PAD_PKCS1; flags |= SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); if (card->type == SC_CARD_TYPE_TCOS_V3) { card->caps |= SC_CARD_CAP_APDU_EXT; _sc_card_add_rsa_alg(card, 1280, flags, 0); _sc_card_add_rsa_alg(card, 1536, flags, 0); _sc_card_add_rsa_alg(card, 1792, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return 0; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static int tcos_construct_fci(const sc_file_t *file, u8 *out, size_t *outlen) { u8 *p = out; u8 buf[64]; size_t n; /* FIXME: possible buffer overflow */ *p++ = 0x6F; /* FCI */ p++; /* File size */ buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x81, buf, 2, p, 16, &p); /* File descriptor */ n = 0; buf[n] = file->shareable ? 0x40 : 0; switch (file->type) { case SC_FILE_TYPE_WORKING_EF: break; case SC_FILE_TYPE_DF: buf[0] |= 0x38; break; default: return SC_ERROR_NOT_SUPPORTED; } buf[n++] |= file->ef_structure & 7; if ( (file->ef_structure & 7) > 1) { /* record structured file */ buf[n++] = 0x41; /* indicate 3rd byte */ buf[n++] = file->record_length; } sc_asn1_put_tag(0x82, buf, n, p, 8, &p); /* File identifier */ buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, 16, &p); /* Directory name */ if (file->type == SC_FILE_TYPE_DF) { if (file->namelen) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, 16, &p); } else { /* TCOS needs one, so we use a faked one */ snprintf ((char *) buf, sizeof(buf)-1, "foo-%lu", (unsigned long) time (NULL)); sc_asn1_put_tag(0x84, buf, strlen ((char *) buf), p, 16, &p); } } /* File descriptor extension */ if (file->prop_attr_len && file->prop_attr) { n = file->prop_attr_len; memcpy(buf, file->prop_attr, n); } else { n = 0; buf[n++] = 0x01; /* not invalidated, permanent */ if (file->type == SC_FILE_TYPE_WORKING_EF) buf[n++] = 0x00; /* generic data file */ } sc_asn1_put_tag(0x85, buf, n, p, 16, &p); /* Security attributes */ if (file->sec_attr_len && file->sec_attr) { memcpy(buf, file->sec_attr, file->sec_attr_len); n = file->sec_attr_len; } else { /* no attributes given - fall back to default one */ memcpy (buf+ 0, "\xa4\x00\x00\x00\xff\xff", 6); /* select */ memcpy (buf+ 6, "\xb0\x00\x00\x00\xff\xff", 6); /* read bin */ memcpy (buf+12, "\xd6\x00\x00\x00\xff\xff", 6); /* upd bin */ memcpy (buf+18, "\x60\x00\x00\x00\xff\xff", 6); /* admin grp*/ n = 24; } sc_asn1_put_tag(0x86, buf, n, p, sizeof (buf), &p); /* fixup length of FCI */ out[1] = p - out - 2; *outlen = p - out; return 0; } static int tcos_create_file(sc_card_t *card, sc_file_t *file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_apdu_t apdu; len = SC_MAX_APDU_BUFFER_SIZE; r = tcos_construct_fci(file, sbuf, &len); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "tcos_construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.cla |= 0x80; /* this is an proprietary extension */ apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static unsigned int map_operations (int commandbyte ) { unsigned int op = (unsigned int)-1; switch ( (commandbyte & 0xfe) ) { case 0xe2: /* append record */ op = SC_AC_OP_UPDATE; break; case 0x24: /* change password */ op = SC_AC_OP_UPDATE; break; case 0xe0: /* create */ op = SC_AC_OP_CREATE; break; case 0xe4: /* delete */ op = SC_AC_OP_DELETE; break; case 0xe8: /* exclude sfi */ op = SC_AC_OP_WRITE; break; case 0x82: /* external auth */ op = SC_AC_OP_READ; break; case 0xe6: /* include sfi */ op = SC_AC_OP_WRITE; break; case 0x88: /* internal auth */ op = SC_AC_OP_READ; break; case 0x04: /* invalidate */ op = SC_AC_OP_INVALIDATE; break; case 0x2a: /* perform sec. op */ op = SC_AC_OP_SELECT; break; case 0xb0: /* read binary */ op = SC_AC_OP_READ; break; case 0xb2: /* read record */ op = SC_AC_OP_READ; break; case 0x44: /* rehabilitate */ op = SC_AC_OP_REHABILITATE; break; case 0xa4: /* select */ op = SC_AC_OP_SELECT; break; case 0xee: /* set permanent */ op = SC_AC_OP_CREATE; break; case 0x2c: /* unblock password */op = SC_AC_OP_WRITE; break; case 0xd6: /* update binary */ op = SC_AC_OP_WRITE; break; case 0xdc: /* update record */ op = SC_AC_OP_WRITE; break; case 0x20: /* verify password */ op = SC_AC_OP_SELECT; break; case 0x60: /* admin group */ op = SC_AC_OP_CREATE; break; } return op; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static void parse_sec_attr(sc_card_t *card, sc_file_t *file, const u8 *buf, size_t len) { unsigned int op; /* list directory is not covered by ACLs - so always add an entry */ sc_file_add_acl_entry (file, SC_AC_OP_LIST_FILES, SC_AC_NONE, SC_AC_KEY_REF_NONE); /* FIXME: check for what LOCK is used */ sc_file_add_acl_entry (file, SC_AC_OP_LOCK, SC_AC_NONE, SC_AC_KEY_REF_NONE); for (; len >= 6; len -= 6, buf += 6) { /* FIXME: temporary hacks */ if (!memcmp(buf, "\xa4\x00\x00\x00\xff\xff", 6)) /* select */ sc_file_add_acl_entry (file, SC_AC_OP_SELECT, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\xb0\x00\x00\x00\xff\xff", 6)) /*read*/ sc_file_add_acl_entry (file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\xd6\x00\x00\x00\xff\xff", 6)) /*upd*/ sc_file_add_acl_entry (file, SC_AC_OP_UPDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); else if (!memcmp(buf, "\x60\x00\x00\x00\xff\xff", 6)) {/*adm */ sc_file_add_acl_entry (file, SC_AC_OP_WRITE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_CREATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_INVALIDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_REHABILITATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else { /* the first byte tells use the command or the command group. We have to mask bit 0 because this one distinguish between AND/OR combination of PINs*/ op = map_operations (buf[0]); if (op == (unsigned int)-1) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unknown security command byte %02x\n", buf[0]); continue; } if (!buf[1]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_CHV, buf[1]); if (!buf[2] && !buf[3]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_TERM, (buf[2]<<8)|buf[3]); } } } static int tcos_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out) { sc_context_t *ctx; sc_apdu_t apdu; sc_file_t *file=NULL; u8 buf[SC_MAX_APDU_BUFFER_SIZE], pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; unsigned int i; int r, pathlen; assert(card != NULL && in_path != NULL); ctx=card->ctx; memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0, 0x04); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; /* fall through */ case SC_PATH_TYPE_FROM_CURRENT: apdu.p1 = 9; break; case SC_PATH_TYPE_DF_NAME: apdu.p1 = 4; break; case SC_PATH_TYPE_PATH: apdu.p1 = 8; if (pathlen >= 2 && memcmp(path, "\x3F\x00", 2) == 0) path += 2, pathlen -= 2; if (pathlen == 0) apdu.p1 = 0; break; case SC_PATH_TYPE_PARENT: apdu.p1 = 3; pathlen = 0; break; default: SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if( pathlen == 0 ) apdu.cse = SC_APDU_CASE_2_SHORT; apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 256; } else { apdu.resplen = 0; apdu.le = 0; apdu.p2 = 0x0C; apdu.cse = (pathlen == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r || file_out == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, r); if (apdu.resplen < 1 || apdu.resp[0] != 0x62){ sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "received invalid template %02X\n", apdu.resp[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } file = sc_file_new(); if (file == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); *file_out = file; file->path = *in_path; for(i=2; i+1<apdu.resplen && i+1+apdu.resp[i+1]<apdu.resplen; i+=2+apdu.resp[i+1]){ size_t j, len=apdu.resp[i+1]; unsigned char type=apdu.resp[i], *d=apdu.resp+i+2; switch (type) { case 0x80: case 0x81: file->size=0; for(j=0; j<len; ++j) file->size = (file->size<<8) | d[j]; break; case 0x82: file->shareable = (d[0] & 0x40) ? 1 : 0; file->ef_structure = d[0] & 7; switch ((d[0]>>3) & 7) { case 0: file->type = SC_FILE_TYPE_WORKING_EF; break; case 7: file->type = SC_FILE_TYPE_DF; break; default: sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "invalid file type %02X in file descriptor\n", d[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } break; case 0x83: file->id = (d[0]<<8) | d[1]; break; case 0x84: file->namelen = MIN(sizeof file->name, len); memcpy(file->name, d, file->namelen); break; case 0x86: sc_file_set_sec_attr(file, d, len); break; default: if (len>0) sc_file_set_prop_attr(file, d, len); } } file->magic = SC_FILE_MAGIC; parse_sec_attr(card, file, file->sec_attr, file->sec_attr_len); return 0; } static int tcos_list_files(sc_card_t *card, u8 *buf, size_t buflen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE], p1; int r, count = 0; assert(card != NULL); ctx = card->ctx; for (p1=1; p1<=2; p1++) { sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xAA, p1, 0); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (apdu.sw1==0x6A && (apdu.sw2==0x82 || apdu.sw2==0x88)) continue; r = sc_check_sw(card, apdu.sw1, apdu.sw2); SC_TEST_RET(ctx, SC_LOG_DEBUG_NORMAL, r, "List Dir failed"); if (apdu.resplen > buflen) return SC_ERROR_BUFFER_TOO_SMALL; sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "got %"SC_FORMAT_LEN_SIZE_T"u %s-FileIDs\n", apdu.resplen / 2, p1 == 1 ? "DF" : "EF"); memcpy(buf, apdu.resp, apdu.resplen); buf += apdu.resplen; buflen -= apdu.resplen; count += apdu.resplen; } return count; } static int tcos_delete_file(sc_card_t *card, const sc_path_t *path) { int r; u8 sbuf[2]; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (path->type != SC_PATH_TYPE_FILE_ID && path->len != 2) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "File type has to be SC_PATH_TYPE_FILE_ID\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } sbuf[0] = path->value[0]; sbuf[1] = path->value[1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { sc_context_t *ctx; sc_apdu_t apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE], *p; int r, default_key, tcos3; tcos_data *data; assert(card != NULL && env != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; if (se_num || (env->operation!=SC_SEC_OPERATION_DECIPHER && env->operation!=SC_SEC_OPERATION_SIGN)){ SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } if(!(env->flags & SC_SEC_ENV_KEY_REF_PRESENT)) sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "No Key-Reference in SecEnvironment\n"); else sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "Key-Reference %02X (len=%"SC_FORMAT_LEN_SIZE_T"u)\n", env->key_ref[0], env->key_ref_len); /* Key-Reference 0x80 ?? */ default_key= !(env->flags & SC_SEC_ENV_KEY_REF_PRESENT) || (env->key_ref_len==1 && env->key_ref[0]==0x80); sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "TCOS3:%d PKCS1:%d\n", tcos3, !!(env->algorithm_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); data->pad_flags = env->algorithm_flags; data->next_sign = default_key; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, tcos3 ? 0x41 : 0xC1, 0xB8); p = sbuf; *p++=0x80; *p++=0x01; *p++=tcos3 ? 0x0A : 0x10; if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { *p++ = (env->flags & SC_SEC_ENV_KEY_REF_SYMMETRIC) ? 0x83 : 0x84; *p++ = env->key_ref_len; memcpy(p, env->key_ref, env->key_ref_len); p += env->key_ref_len; } apdu.data = sbuf; apdu.lc = apdu.datalen = (p - sbuf); r=sc_transmit_apdu(card, &apdu); if (r) { sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "%s: APDU transmit failed", sc_strerror(r)); return r; } if (apdu.sw1==0x6A && (apdu.sw2==0x81 || apdu.sw2==0x88)) { sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "Detected Signature-Only key\n"); if (env->operation==SC_SEC_OPERATION_SIGN && default_key) return SC_SUCCESS; } SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_restore_security_env(sc_card_t *card, int se_num) { return 0; } static int tcos_compute_signature(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { size_t i, dlen=datalen; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; int tcos3, r; assert(card != NULL && data != NULL && out != NULL); tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); if (datalen > 255) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); if(((tcos_data *)card->drv_data)->next_sign){ if(datalen>48){ sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Data to be signed is too long (TCOS supports max. 48 bytes)\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A, 0x9E, 0x9A); memcpy(sbuf, data, datalen); dlen=datalen; } else { int keylen= tcos3 ? 256 : 128; sc_format_apdu(card, &apdu, keylen>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = tcos3 ? 256 : 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (tcos3 && apdu.p1==0x80 && apdu.sw1==0x6A && apdu.sw2==0x87) { int keylen=128; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); } if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len = apdu.resplen>outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_decipher(sc_card_t *card, const u8 * crgram, size_t crgram_len, u8 * out, size_t outlen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; tcos_data *data; int tcos3, r; assert(card != NULL && crgram != NULL && out != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; SC_FUNC_CALLED(ctx, SC_LOG_DEBUG_NORMAL); sc_debug(ctx, SC_LOG_DEBUG_NORMAL, "TCOS3:%d PKCS1:%d\n",tcos3, !!(data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); sc_format_apdu(card, &apdu, crgram_len>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = crgram_len; apdu.data = sbuf; apdu.lc = apdu.datalen = crgram_len+1; sbuf[0] = tcos3 ? 0x00 : ((data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) ? 0x81 : 0x02); memcpy(sbuf+1, crgram, crgram_len); r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len= (apdu.resplen>outlen) ? outlen : apdu.resplen; unsigned int offset=0; if(tcos3 && (data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) && apdu.resp[0]==0 && apdu.resp[1]==2){ offset=2; while(offset<len && apdu.resp[offset]!=0) ++offset; offset=(offset<len-1) ? offset+1 : 0; } memcpy(out, apdu.resp+offset, len-offset); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len-offset); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } /* Issue the SET PERMANENT command. With ENABLE_NULLPIN set the NullPIN method will be activated, otherwise the permanent operation will be done on the active file. */ static int tcos_setperm(sc_card_t *card, int enable_nullpin) { int r; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xEE, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 0; apdu.datalen = 0; apdu.data = NULL; r = sc_transmit_apdu(card, &apdu); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_get_serialnr(sc_card_t *card, sc_serial_number_t *serial) { int r; if (!serial) return SC_ERROR_INVALID_ARGUMENTS; /* see if we have cached serial number */ if (card->serialnr.len) { memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } card->serialnr.len = sizeof card->serialnr.value; r = sc_parse_ef_gdo(card, card->serialnr.value, &card->serialnr.len, NULL, 0); if (r < 0) { card->serialnr.len = 0; return r; } /* copy and return serial number */ memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } static int tcos_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr) { switch (cmd) { case SC_CARDCTL_TCOS_SETPERM: return tcos_setperm(card, !!ptr); case SC_CARDCTL_GET_SERIALNR: return tcos_get_serialnr(card, (sc_serial_number_t *)ptr); } return SC_ERROR_NOT_SUPPORTED; } struct sc_card_driver * sc_get_tcos_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; tcos_ops = *iso_drv->ops; tcos_ops.match_card = tcos_match_card; tcos_ops.init = tcos_init; tcos_ops.finish = tcos_finish; tcos_ops.create_file = tcos_create_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.select_file = tcos_select_file; tcos_ops.list_files = tcos_list_files; tcos_ops.delete_file = tcos_delete_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.compute_signature = tcos_compute_signature; tcos_ops.decipher = tcos_decipher; tcos_ops.restore_security_env = tcos_restore_security_env; tcos_ops.card_ctl = tcos_card_ctl; return &tcos_drv; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_340_3
crossvul-cpp_data_good_5476_1
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ /* * TIFF Library. * * Predictor Tag Support (used by multiple codecs). */ #include "tiffiop.h" #include "tif_predict.h" #define PredictorState(tif) ((TIFFPredictorState*) (tif)->tif_data) static int horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc); static int horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static int horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static int fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc); static int fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc); static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s); static int PredictorSetup(TIFF* tif) { static const char module[] = "PredictorSetup"; TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; switch (sp->predictor) /* no differencing */ { case PREDICTOR_NONE: return 1; case PREDICTOR_HORIZONTAL: if (td->td_bitspersample != 8 && td->td_bitspersample != 16 && td->td_bitspersample != 32) { TIFFErrorExt(tif->tif_clientdata, module, "Horizontal differencing \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; case PREDICTOR_FLOATINGPOINT: if (td->td_sampleformat != SAMPLEFORMAT_IEEEFP) { TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d data format", td->td_sampleformat); return 0; } if (td->td_bitspersample != 16 && td->td_bitspersample != 24 && td->td_bitspersample != 32 && td->td_bitspersample != 64) { /* Should 64 be allowed? */ TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; default: TIFFErrorExt(tif->tif_clientdata, module, "\"Predictor\" value %d not supported", sp->predictor); return 0; } sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); /* * Calculate the scanline/tile-width size in bytes. */ if (isTiled(tif)) sp->rowsize = TIFFTileRowSize(tif); else sp->rowsize = TIFFScanlineSize(tif); if (sp->rowsize == 0) return 0; return 1; } static int PredictorSetupDecode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupdecode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->decodepfunc = horAcc8; break; case 16: sp->decodepfunc = horAcc16; break; case 32: sp->decodepfunc = horAcc32; break; } /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped before * the accumulation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->decodepfunc == horAcc16) { sp->decodepfunc = swabHorAcc16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->decodepfunc == horAcc32) { sp->decodepfunc = swabHorAcc32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->decodepfunc = fpAcc; /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * The data should not be swapped outside of the floating * point predictor, the accumulation routine should return * byres in the native order. */ if (tif->tif_flags & TIFF_SWAB) { tif->tif_postdecode = _TIFFNoPostDecode; } /* * Allocate buffer to keep the decoded bytes before * rearranging in the right order */ } return 1; } static int PredictorSetupEncode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupencode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->encodepfunc = horDiff8; break; case 16: sp->encodepfunc = horDiff16; break; case 32: sp->encodepfunc = horDiff32; break; } /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped after * the differentiation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->encodepfunc == horDiff16) { sp->encodepfunc = swabHorDiff16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->encodepfunc == horDiff32) { sp->encodepfunc = swabHorDiff32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->encodepfunc = fpDiff; /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } } return 1; } #define REPEAT4(n, op) \ switch (n) { \ default: { tmsize_t i; for (i = n-4; i > 0; i--) { op; } } \ case 4: op; \ case 3: op; \ case 2: op; \ case 1: op; \ case 0: ; \ } /* Remarks related to C standard compliance in all below functions : */ /* - to avoid any undefined behaviour, we only operate on unsigned types */ /* since the behaviour of "overflows" is defined (wrap over) */ /* - when storing into the byte stream, we explicitly mask with 0xff so */ /* as to make icc -check=conversions happy (not necessary by the standard) */ static int horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; unsigned char* cp = (unsigned char*) cp0; if((cc%stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc8", "%s", "(cc%stride)!=0"); return 0; } if (cc > stride) { /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; cc -= 3; cp += 3; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cc -= 3; cp += 3; } } else if (stride == 4) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; unsigned int ca = cp[3]; cc -= 4; cp += 4; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cp[3] = (unsigned char) ((ca += cp[3]) & 0xff); cc -= 4; cp += 4; } } else { cc -= stride; do { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + *cp) & 0xff); cp++) cc -= stride; } while (cc>0); } } return 1; } static int swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; TIFFSwabArrayOfShort(wp, wc); return horAcc16(tif, cp0, cc); } static int horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; if((cc%(2*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc16", "%s", "cc%(2*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] + (unsigned int)wp[0]) & 0xffff); wp++) wc -= stride; } while (wc > 0); } return 1; } static int swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; TIFFSwabArrayOfLong(wp, wc); return horAcc32(tif, cp0, cc); } static int horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; if((cc%(4*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc32", "%s", "cc%(4*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } return 1; } /* * Floating point predictor accumulation routine. */ static int fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count = cc; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if(cc%(bps*stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "fpAcc", "%s", "cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; while (count > stride) { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + cp[0]) & 0xff); cp++) count -= stride; } _TIFFmemcpy(tmp, cp0, cc); cp = (uint8 *) cp0; for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[bps * count + byte] = tmp[byte * wc + count]; #else cp[bps * count + byte] = tmp[(bps - byte - 1) * wc + count]; #endif } } _TIFFfree(tmp); return 1; } /* * Decode a scanline and apply the predictor routine. */ static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decoderow != NULL); assert(sp->decodepfunc != NULL); if ((*sp->decoderow)(tif, op0, occ0, s)) { return (*sp->decodepfunc)(tif, op0, occ0); } else return 0; } /* * Decode a tile/strip and apply the predictor routine. * Note that horizontal differencing must be done on a * row-by-row basis. The width of a "row" has already * been calculated at pre-decode time according to the * strip/tile dimensions. */ static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decodetile != NULL); if ((*sp->decodetile)(tif, op0, occ0, s)) { tmsize_t rowsize = sp->rowsize; assert(rowsize > 0); if((occ0%rowsize) !=0) { TIFFErrorExt(tif->tif_clientdata, "PredictorDecodeTile", "%s", "occ0%rowsize != 0"); return 0; } assert(sp->decodepfunc != NULL); while (occ0 > 0) { if( !(*sp->decodepfunc)(tif, op0, rowsize) ) return 0; occ0 -= rowsize; op0 += rowsize; } return 1; } else return 0; } static int horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; unsigned char* cp = (unsigned char*) cp0; if((cc%stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff8", "%s", "(cc%stride)!=0"); return 0; } if (cc > stride) { cc -= stride; /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int r1, g1, b1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; do { r1 = cp[3]; cp[3] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[4]; cp[4] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[5]; cp[5] = (unsigned char)((b1-b2)&0xff); b2 = b1; cp += 3; } while ((cc -= 3) > 0); } else if (stride == 4) { unsigned int r1, g1, b1, a1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; unsigned int a2 = cp[3]; do { r1 = cp[4]; cp[4] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[5]; cp[5] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[6]; cp[6] = (unsigned char)((b1-b2)&0xff); b2 = b1; a1 = cp[7]; cp[7] = (unsigned char)((a1-a2)&0xff); a2 = a1; cp += 4; } while ((cc -= 4) > 0); } else { cp += cc - 1; do { REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } while ((cc -= stride) > 0); } } return 1; } static int horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint16 *wp = (uint16*) cp0; tmsize_t wc = cc/2; if((cc%(2*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff8", "%s", "(cc%(2*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--) wc -= stride; } while (wc > 0); } return 1; } static int swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; if( !horDiff16(tif, cp0, cc) ) return 0; TIFFSwabArrayOfShort(wp, wc); return 1; } static int horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint32 *wp = (uint32*) cp0; tmsize_t wc = cc/4; if((cc%(4*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff32", "%s", "(cc%(4*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] -= wp[0]; wp--) wc -= stride; } while (wc > 0); } return 1; } static int swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; if( !horDiff32(tif, cp0, cc) ) return 0; TIFFSwabArrayOfLong(wp, wc); return 1; } /* * Floating point predictor differencing routine. */ static int fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if((cc%(bps*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "fpDiff", "%s", "(cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; _TIFFmemcpy(tmp, cp0, cc); for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[byte * wc + count] = tmp[bps * count + byte]; #else cp[(bps - byte - 1) * wc + count] = tmp[bps * count + byte]; #endif } } _TIFFfree(tmp); cp = (uint8 *) cp0; cp += cc - stride - 1; for (count = cc; count > stride; count -= stride) REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) return 1; } static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encoderow != NULL); /* XXX horizontal differencing alters user's data XXX */ if( !(*sp->encodepfunc)(tif, bp, cc) ) return 0; return (*sp->encoderow)(tif, bp, cc, s); } static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s) { static const char module[] = "PredictorEncodeTile"; TIFFPredictorState *sp = PredictorState(tif); uint8 *working_copy; tmsize_t cc = cc0, rowsize; unsigned char* bp; int result_code; assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encodetile != NULL); /* * Do predictor manipulation in a working buffer to avoid altering * the callers buffer. http://trac.osgeo.org/gdal/ticket/1965 */ working_copy = (uint8*) _TIFFmalloc(cc0); if( working_copy == NULL ) { TIFFErrorExt(tif->tif_clientdata, module, "Out of memory allocating " TIFF_SSIZE_FORMAT " byte temp buffer.", cc0 ); return 0; } memcpy( working_copy, bp0, cc0 ); bp = working_copy; rowsize = sp->rowsize; assert(rowsize > 0); if((cc0%rowsize)!=0) { TIFFErrorExt(tif->tif_clientdata, "PredictorEncodeTile", "%s", "(cc0%rowsize)!=0"); return 0; } while (cc > 0) { (*sp->encodepfunc)(tif, bp, rowsize); cc -= rowsize; bp += rowsize; } result_code = (*sp->encodetile)(tif, working_copy, cc0, s); _TIFFfree( working_copy ); return result_code; } #define FIELD_PREDICTOR (FIELD_CODEC+0) /* XXX */ static const TIFFField predictFields[] = { { TIFFTAG_PREDICTOR, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UINT16, FIELD_PREDICTOR, FALSE, FALSE, "Predictor", NULL }, }; static int PredictorVSetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vsetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: sp->predictor = (uint16) va_arg(ap, uint16_vap); TIFFSetFieldBit(tif, FIELD_PREDICTOR); break; default: return (*sp->vsetparent)(tif, tag, ap); } tif->tif_flags |= TIFF_DIRTYDIRECT; return 1; } static int PredictorVGetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vgetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: *va_arg(ap, uint16*) = (uint16)sp->predictor; break; default: return (*sp->vgetparent)(tif, tag, ap); } return 1; } static void PredictorPrintDir(TIFF* tif, FILE* fd, long flags) { TIFFPredictorState* sp = PredictorState(tif); (void) flags; if (TIFFFieldSet(tif,FIELD_PREDICTOR)) { fprintf(fd, " Predictor: "); switch (sp->predictor) { case 1: fprintf(fd, "none "); break; case 2: fprintf(fd, "horizontal differencing "); break; case 3: fprintf(fd, "floating point predictor "); break; } fprintf(fd, "%u (0x%x)\n", sp->predictor, sp->predictor); } if (sp->printdir) (*sp->printdir)(tif, fd, flags); } int TIFFPredictorInit(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, predictFields, TIFFArrayCount(predictFields))) { TIFFErrorExt(tif->tif_clientdata, "TIFFPredictorInit", "Merging Predictor codec-specific tags failed"); return 0; } /* * Override parent get/set field methods. */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PredictorVGetField;/* hook for predictor tag */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PredictorVSetField;/* hook for predictor tag */ sp->printdir = tif->tif_tagmethods.printdir; tif->tif_tagmethods.printdir = PredictorPrintDir; /* hook for predictor tag */ sp->setupdecode = tif->tif_setupdecode; tif->tif_setupdecode = PredictorSetupDecode; sp->setupencode = tif->tif_setupencode; tif->tif_setupencode = PredictorSetupEncode; sp->predictor = 1; /* default value */ sp->encodepfunc = NULL; /* no predictor routine */ sp->decodepfunc = NULL; /* no predictor routine */ return 1; } int TIFFPredictorCleanup(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; tif->tif_tagmethods.printdir = sp->printdir; tif->tif_setupdecode = sp->setupdecode; tif->tif_setupencode = sp->setupencode; return 1; } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
./CrossVul/dataset_final_sorted/CWE-119/c/good_5476_1
crossvul-cpp_data_bad_969_0
/* SSDP responder * * Copyright (c) 2017 Joachim Nilsson <troglobit@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.a */ #include <config.h> #include <ctype.h> #include <err.h> #include <errno.h> #include <getopt.h> #include <ifaddrs.h> #include <netdb.h> #include <paths.h> #include <poll.h> #include <stdio.h> #include <signal.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include <arpa/inet.h> #include <netinet/ip.h> #include <netinet/udp.h> #include <sys/param.h> /* MIN() */ #include <sys/socket.h> #include "ssdp.h" #include "queue.h" struct ifsock { LIST_ENTRY(ifsock) link; int stale; int mod; /* * Sockets for inbound and outbound * * - The inbound is the multicast socket, shared between all ifaces * - The outbound is bound to the iface address and a random port */ int in, out; /* Interface address and netmask */ struct sockaddr_in addr; struct sockaddr_in mask; void (*cb)(int); }; LIST_HEAD(, ifsock) il = LIST_HEAD_INITIALIZER(); static char *supported_types[] = { SSDP_ST_ALL, "upnp:rootdevice", "urn:schemas-upnp-org:device:InternetGatewayDevice:1", uuid, NULL }; int debug = 0; int running = 1; char uuid[42]; char hostname[64]; char *os = NULL, *ver = NULL; char server_string[64] = "POSIX UPnP/1.0 " PACKAGE_NAME "/" PACKAGE_VERSION; /* Find interface in same subnet as sa */ static struct ifsock *find_outbound(struct sockaddr *sa) { in_addr_t cand; struct ifsock *ifs; struct sockaddr_in *addr = (struct sockaddr_in *)sa; cand = addr->sin_addr.s_addr; LIST_FOREACH(ifs, &il, link) { in_addr_t a, m; a = ifs->addr.sin_addr.s_addr; m = ifs->mask.sin_addr.s_addr; if (a == htonl(INADDR_ANY) || m == htonl(INADDR_ANY)) continue; if ((a & m) == (cand & m)) return ifs; } return NULL; } /* Exact match, must be same ifaddr as sa */ static struct ifsock *find_iface(struct sockaddr *sa) { struct ifsock *ifs; struct sockaddr_in *addr = (struct sockaddr_in *)sa; if (!sa) return NULL; LIST_FOREACH(ifs, &il, link) { if (ifs->addr.sin_addr.s_addr == addr->sin_addr.s_addr) return ifs; } return NULL; } int register_socket(int in, int out, struct sockaddr *addr, struct sockaddr *mask, void (*cb)(int sd)) { struct ifsock *ifs; struct sockaddr_in *address = (struct sockaddr_in *)addr; struct sockaddr_in *netmask = (struct sockaddr_in *)mask; ifs = calloc(1, sizeof(*ifs)); if (!ifs) { char *host = inet_ntoa(address->sin_addr); logit(LOG_ERR, "Failed registering host %s socket: %s", host, strerror(errno)); return -1; } ifs->in = in; ifs->out = out; ifs->mod = 1; ifs->cb = cb; ifs->addr = *address; if (mask) ifs->mask = *netmask; LIST_INSERT_HEAD(&il, ifs, link); return 0; } static int open_socket(char *ifname, struct sockaddr *addr, int port) { int sd, val, rc; char loop; struct ip_mreqn mreq; struct sockaddr_in sin, *address = (struct sockaddr_in *)addr; sd = socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK | SOCK_CLOEXEC, 0); if (sd < 0) return -1; sin.sin_family = AF_INET; sin.sin_port = htons(port); sin.sin_addr = address->sin_addr; if (bind(sd, (struct sockaddr *)&sin, sizeof(sin)) < 0) { close(sd); logit(LOG_ERR, "Failed binding to %s:%d: %s", inet_ntoa(address->sin_addr), port, strerror(errno)); return -1; } #if 0 ENABLE_SOCKOPT(sd, SOL_SOCKET, SO_REUSEADDR); #ifdef SO_REUSEPORT ENABLE_SOCKOPT(sd, SOL_SOCKET, SO_REUSEPORT); #endif #endif memset(&mreq, 0, sizeof(mreq)); mreq.imr_address = address->sin_addr; mreq.imr_multiaddr.s_addr = inet_addr(MC_SSDP_GROUP); if (setsockopt(sd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq))) { close(sd); logit(LOG_ERR, "Failed joining group %s: %s", MC_SSDP_GROUP, strerror(errno)); return -1; } val = 2; /* Default 2, but should be configurable */ rc = setsockopt(sd, IPPROTO_IP, IP_MULTICAST_TTL, &val, sizeof(val)); if (rc < 0) { close(sd); logit(LOG_ERR, "Failed setting multicast TTL: %s", strerror(errno)); return -1; } loop = 0; rc = setsockopt(sd, IPPROTO_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); if (rc < 0) { close(sd); logit(LOG_ERR, "Failed disabing multicast loop: %s", strerror(errno)); return -1; } rc = setsockopt(sd, IPPROTO_IP, IP_MULTICAST_IF, &address->sin_addr, sizeof(address->sin_addr)); if (rc < 0) { close(sd); logit(LOG_ERR, "Failed setting multicast interface: %s", strerror(errno)); return -1; } logit(LOG_DEBUG, "Adding new interface %s with address %s", ifname, inet_ntoa(address->sin_addr)); return sd; } static int close_socket(void) { int ret = 0; struct ifsock *ifs, *tmp; LIST_FOREACH_SAFE(ifs, &il, link, tmp) { LIST_REMOVE(ifs, link); if (ifs->out != -1) ret |= close(ifs->out); else ret |= close(ifs->in); free(ifs); } return ret; } static int filter_addr(struct sockaddr *sa) { struct ifsock *ifs; struct sockaddr_in *sin = (struct sockaddr_in *)sa; if (!sa) return 1; if (sa->sa_family != AF_INET) return 1; if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) return 1; if (sin->sin_addr.s_addr == htonl(INADDR_LOOPBACK)) return 1; ifs = find_outbound(sa); if (ifs) { if (ifs->addr.sin_addr.s_addr != htonl(INADDR_ANY)) return 1; } return 0; } static int filter_iface(char *ifname, char *iflist[], size_t num) { size_t i; if (!num) { logit(LOG_DEBUG, "No interfaces to filter, using all with an IP address."); return 0; } logit(LOG_DEBUG, "Filter %s? Comparing %zd entries ...", ifname, num); for (i = 0; i < num; i++) { logit(LOG_DEBUG, "Filter %s? Comparing with %s ...", ifname, iflist[i]); if (!strcmp(ifname, iflist[i])) return 0; } return 1; } static void compose_addr(struct sockaddr_in *sin, char *group, int port) { memset(sin, 0, sizeof(*sin)); sin->sin_family = AF_INET; sin->sin_port = htons(port); sin->sin_addr.s_addr = inet_addr(group); } static void compose_response(char *type, char *host, char *buf, size_t len) { char usn[256]; char date[42]; time_t now; /* RFC1123 date, as specified in RFC2616 */ now = time(NULL); strftime(date, sizeof(date), "%a, %d %b %Y %T %Z", gmtime(&now)); if (type) { if (!strcmp(type, uuid)) type = NULL; else snprintf(usn, sizeof(usn), "%s::%s", uuid, type); } if (!type) strncpy(usn, uuid, sizeof(usn)); snprintf(buf, len, "HTTP/1.1 200 OK\r\n" "Server: %s\r\n" "Date: %s\r\n" "Location: http://%s:%d%s\r\n" "ST: %s\r\n" "EXT: \r\n" "USN: %s\r\n" "Cache-Control: max-age=%d\r\n" "\r\n", server_string, date, host, LOCATION_PORT, LOCATION_DESC, type, usn, CACHE_TIMEOUT); } static void compose_search(char *type, char *buf, size_t len) { snprintf(buf, len, "M-SEARCH * HTTP/1.1\r\n" "Host: %s:%d\r\n" "MAN: \"ssdp:discover\"\r\n" "MX: 1\r\n" "ST: %s\r\n" "User-Agent: %s\r\n" "\r\n", MC_SSDP_GROUP, MC_SSDP_PORT, type, server_string); } static void compose_notify(char *type, char *host, char *buf, size_t len) { char usn[256]; if (type) { if (!strcmp(type, SSDP_ST_ALL)) type = NULL; else snprintf(usn, sizeof(usn), "%s::%s", uuid, type); } if (!type) { type = usn; strncpy(usn, uuid, sizeof(usn)); } snprintf(buf, len, "NOTIFY * HTTP/1.1\r\n" "Host: %s:%d\r\n" "Server: %s\r\n" "Location: http://%s:%d%s\r\n" "NT: %s\r\n" "NTS: ssdp:alive\r\n" "USN: %s\r\n" "Cache-Control: max-age=%d\r\n" "\r\n", MC_SSDP_GROUP, MC_SSDP_PORT, server_string, host, LOCATION_PORT, LOCATION_DESC, type, usn, CACHE_TIMEOUT); } size_t pktlen(unsigned char *buf) { size_t hdr = sizeof(struct udphdr); return strlen((char *)buf + hdr) + hdr; } static void send_search(struct ifsock *ifs, char *type) { ssize_t num; char buf[MAX_PKT_SIZE]; struct sockaddr dest; memset(buf, 0, sizeof(buf)); compose_search(type, buf, sizeof(buf)); compose_addr((struct sockaddr_in *)&dest, MC_SSDP_GROUP, MC_SSDP_PORT); logit(LOG_DEBUG, "Sending M-SEARCH ..."); num = sendto(ifs->out, buf, strlen(buf), 0, &dest, sizeof(struct sockaddr_in)); if (num < 0) logit(LOG_WARNING, "Failed sending SSDP M-SEARCH"); } static void send_message(struct ifsock *ifs, char *type, struct sockaddr *sa) { int s; size_t i, len, note = 0; ssize_t num; char host[NI_MAXHOST]; char buf[MAX_PKT_SIZE]; struct sockaddr dest; struct sockaddr_in *sin = (struct sockaddr_in *)sa; gethostname(hostname, sizeof(hostname)); s = getnameinfo((struct sockaddr *)&ifs->addr, sizeof(struct sockaddr_in), host, sizeof(host), NULL, 0, NI_NUMERICHOST); if (s) { logit(LOG_WARNING, "Failed getnameinfo(): %s", gai_strerror(s)); return; } if (ifs->addr.sin_addr.s_addr == htonl(INADDR_ANY)) return; if (ifs->out == -1) return; if (!strcmp(type, SSDP_ST_ALL)) type = NULL; memset(buf, 0, sizeof(buf)); if (sin) compose_response(type, host, buf, sizeof(buf)); else compose_notify(type, host, buf, sizeof(buf)); if (!sin) { note = 1; compose_addr((struct sockaddr_in *)&dest, MC_SSDP_GROUP, MC_SSDP_PORT); sin = (struct sockaddr_in *)&dest; } logit(LOG_DEBUG, "Sending %s from %s ...", !note ? "reply" : "notify", host); num = sendto(ifs->out, buf, strlen(buf), 0, sin, sizeof(struct sockaddr_in)); if (num < 0) logit(LOG_WARNING, "Failed sending SSDP %s, type: %s: %s", !note ? "reply" : "notify", type, strerror(errno)); } static void ssdp_recv(int sd) { ssize_t len; struct sockaddr sa; socklen_t salen; char buf[MAX_PKT_SIZE]; memset(buf, 0, sizeof(buf)); len = recvfrom(sd, buf, sizeof(buf), MSG_DONTWAIT, &sa, &salen); if (len > 0) { buf[len] = 0; if (sa.sa_family != AF_INET) return; if (strstr(buf, "M-SEARCH *")) { size_t i; char *ptr, *type; struct ifsock *ifs; struct sockaddr_in *sin = (struct sockaddr_in *)&sa; ifs = find_outbound(&sa); if (!ifs) { logit(LOG_DEBUG, "No matching socket for client %s", inet_ntoa(sin->sin_addr)); return; } logit(LOG_DEBUG, "Matching socket for client %s", inet_ntoa(sin->sin_addr)); type = strcasestr(buf, "\r\nST:"); if (!type) { logit(LOG_DEBUG, "No Search Type (ST:) found in M-SEARCH *, assuming " SSDP_ST_ALL); type = SSDP_ST_ALL; send_message(ifs, type, &sa); return; } type = strchr(type, ':'); if (!type) return; type++; while (isspace(*type)) type++; ptr = strstr(type, "\r\n"); if (!ptr) return; *ptr = 0; for (i = 0; supported_types[i]; i++) { if (!strcmp(supported_types[i], type)) { logit(LOG_DEBUG, "M-SEARCH * ST: %s from %s port %d", type, inet_ntoa(sin->sin_addr), ntohs(sin->sin_port)); send_message(ifs, type, &sa); return; } } logit(LOG_DEBUG, "M-SEARCH * for unsupported ST: %s from %s", type, inet_ntoa(sin->sin_addr)); } } } static int multicast_init(void) { int sd; struct sockaddr sa; struct sockaddr_in *sin = (struct sockaddr_in *)&sa; sd = socket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK | SOCK_CLOEXEC, 0); if (sd < 0) { logit(LOG_ERR, "Failed opening multicast socket: %s", strerror(errno)); return -1; } memset(&sa, 0, sizeof(sa)); sin->sin_family = AF_INET; sin->sin_addr.s_addr = inet_addr(MC_SSDP_GROUP); sin->sin_port = htons(MC_SSDP_PORT); if (bind(sd, &sa, sizeof(*sin)) < 0) { close(sd); logit(LOG_ERR, "Failed binding to %s:%d: %s", inet_ntoa(sin->sin_addr), MC_SSDP_PORT, strerror(errno)); return -1; } register_socket(sd, -1, &sa, NULL, ssdp_recv); return sd; } static int multicast_join(int sd, struct sockaddr *sa) { struct ip_mreqn mreq; struct sockaddr_in *sin = (struct sockaddr_in *)sa; memset(&mreq, 0, sizeof(mreq)); mreq.imr_address = sin->sin_addr; mreq.imr_multiaddr.s_addr = inet_addr(MC_SSDP_GROUP); if (setsockopt(sd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq))) { if (EADDRINUSE == errno) return 0; logit(LOG_ERR, "Failed joining group %s: %s", MC_SSDP_GROUP, strerror(errno)); return -1; } return 0; } static void mark(void) { struct ifsock *ifs; LIST_FOREACH(ifs, &il, link) { if (ifs->out != -1) ifs->stale = 1; else ifs->stale = 0; } } static int sweep(void) { int modified = 0; struct ifsock *ifs, *tmp; LIST_FOREACH_SAFE(ifs, &il, link, tmp) { if (!ifs->stale) continue; modified++; logit(LOG_DEBUG, "Removing stale ifs %s", inet_ntoa(ifs->addr.sin_addr)); LIST_REMOVE(ifs, link); close(ifs->out); free(ifs); } return modified; } static int ssdp_init(int in, char *iflist[], size_t num) { int modified; size_t i; struct ifaddrs *ifaddrs, *ifa; logit(LOG_INFO, "Updating interfaces ..."); if (getifaddrs(&ifaddrs) < 0) { logit(LOG_ERR, "Failed getifaddrs(): %s", strerror(errno)); return -1; } /* Mark all outbound interfaces as stale */ mark(); /* First pass, clear stale marker from exact matches */ for (ifa = ifaddrs; ifa; ifa = ifa->ifa_next) { struct ifsock *ifs; /* Do we already have it? */ ifs = find_iface(ifa->ifa_addr); if (ifs) { ifs->stale = 0; continue; } } /* Clean out any stale interface addresses */ modified = sweep(); /* Second pass, add new ones */ for (ifa = ifaddrs; ifa; ifa = ifa->ifa_next) { int sd; /* Interface filtering, optional command line argument */ if (filter_iface(ifa->ifa_name, iflist, num)) { logit(LOG_DEBUG, "Skipping %s, not in iflist.", ifa->ifa_name); continue; } /* Do we have another in the same subnet? */ if (filter_addr(ifa->ifa_addr)) continue; sd = open_socket(ifa->ifa_name, ifa->ifa_addr, MC_SSDP_PORT); if (sd < 0) continue; multicast_join(in, ifa->ifa_addr); if (register_socket(in, sd, ifa->ifa_addr, ifa->ifa_netmask, ssdp_recv)) { close(sd); break; } modified++; } freeifaddrs(ifaddrs); return modified; } static void handle_message(int sd) { struct ifsock *ifs; LIST_FOREACH(ifs, &il, link) { if (ifs->in != sd) continue; if (ifs->cb) ifs->cb(sd); } } static void wait_message(time_t tmo) { int num = 1, timeout; size_t ifnum = 0; struct pollfd pfd[MAX_NUM_IFACES]; struct ifsock *ifs; LIST_FOREACH(ifs, &il, link) { if (ifs->out != -1) continue; pfd[ifnum].fd = ifs->in; pfd[ifnum].events = POLLIN | POLLHUP; ifnum++; } while (1) { size_t i; timeout = tmo - time(NULL); if (timeout < 0) break; num = poll(pfd, ifnum, timeout * 1000); if (num < 0) { if (EINTR == errno) break; err(1, "Unrecoverable error"); } if (num == 0) break; for (i = 0; num > 0 && i < ifnum; i++) { if (pfd[i].revents & POLLIN) { handle_message(pfd[i].fd); num--; } } } } static void announce(int mod) { struct ifsock *ifs; logit(LOG_INFO, "Sending SSDP NOTIFY new:%d ...", mod); LIST_FOREACH(ifs, &il, link) { size_t i; if (mod && !ifs->mod) continue; ifs->mod = 0; // send_search(ifs, "upnp:rootdevice"); for (i = 0; supported_types[i]; i++) { /* UUID sent in SSDP_ST_ALL, first announce */ if (!strcmp(supported_types[i], uuid)) continue; send_message(ifs, supported_types[i], NULL); } } } static void lsb_init(void) { FILE *fp; char *ptr; char line[80]; const char *file = "/etc/lsb-release"; fp = fopen(file, "r"); if (!fp) { fallback: logit(LOG_WARNING, "No %s found on system, using built-in server string.", file); return; } while (fgets(line, sizeof(line), fp)) { line[strlen(line) - 1] = 0; ptr = strstr(line, "DISTRIB_ID"); if (ptr && (ptr = strchr(ptr, '='))) os = strdup(++ptr); ptr = strstr(line, "DISTRIB_RELEASE"); if (ptr && (ptr = strchr(ptr, '='))) ver = strdup(++ptr); } fclose(fp); if (os && ver) snprintf(server_string, sizeof(server_string), "%s/%s UPnP/1.0 %s/%s", os, ver, PACKAGE_NAME, PACKAGE_VERSION); else goto fallback; logit(LOG_DEBUG, "Server: %s", server_string); } /* https://en.wikipedia.org/wiki/Universally_unique_identifier */ static void uuidgen(void) { FILE *fp; char buf[42]; const char *file = _PATH_VARDB PACKAGE_NAME ".cache"; fp = fopen(file, "r"); if (!fp) { fp = fopen(file, "w"); if (!fp) logit(LOG_WARNING, "Cannot create UUID cache, %s: %s", file, strerror(errno)); generate: srand(time(NULL)); snprintf(buf, sizeof(buf), "uuid:%8.8x-%4.4x-%4.4x-%4.4x-%6.6x%6.6x", rand() & 0xFFFFFFFF, rand() & 0xFFFF, (rand() & 0x0FFF) | 0x4000, /* M 4 MSB version => version 4 */ (rand() & 0x1FFF) | 0x8000, /* N: 3 MSB variant => variant 1 */ rand() & 0xFFFFFF, rand() & 0xFFFFFF); if (fp) { logit(LOG_DEBUG, "Creating new UUID cache file, %s", file); fprintf(fp, "%s\n", buf); fclose(fp); } } else { if (!fgets(buf, sizeof(buf), fp)) { fclose(fp); goto generate; } buf[strlen(buf) - 1] = 0; fclose(fp); } strcpy(uuid, buf); logit(LOG_DEBUG, "URN: %s", uuid); } static void exit_handler(int signo) { (void)signo; running = 0; } static void signal_init(void) { signal(SIGTERM, exit_handler); signal(SIGINT, exit_handler); signal(SIGHUP, exit_handler); signal(SIGQUIT, exit_handler); } static int usage(int code) { printf("Usage: %s [-dhv] [-i SEC] [IFACE [IFACE ...]]\n" "\n" " -d Developer debug mode\n" " -h This help text\n" " -i SEC SSDP notify interval (30-900), default %d sec\n" " -r SEC Interface refresh interval (5-1800), default %d sec\n" " -v Show program version\n" "\n" "Bug report address: %-40s\n", PACKAGE_NAME, NOTIFY_INTERVAL, REFRESH_INTERVAL, PACKAGE_BUGREPORT); return code; } int main(int argc, char *argv[]) { int i, c, sd; int log_level = LOG_NOTICE; int log_opts = LOG_CONS | LOG_PID; int interval = NOTIFY_INTERVAL; int refresh = REFRESH_INTERVAL; time_t now, rtmo = 0, itmo = 0; while ((c = getopt(argc, argv, "dhi:r:v")) != EOF) { switch (c) { case 'd': debug = 1; break; case 'h': return usage(0); case 'i': interval = atoi(optarg); if (interval < 30 || interval > 900) errx(1, "Invalid announcement interval (30-900)."); break; case 'r': refresh = atoi(optarg); if (refresh < 5 || refresh > 1800) errx(1, "Invalid refresh interval (5-1800)."); break; case 'v': puts(PACKAGE_VERSION); return 0; default: break; } } signal_init(); if (debug) { log_level = LOG_DEBUG; log_opts |= LOG_PERROR; } openlog(PACKAGE_NAME, log_opts, LOG_DAEMON); setlogmask(LOG_UPTO(log_level)); uuidgen(); lsb_init(); web_init(); sd = multicast_init(); if (sd < 0) err(1, "Failed creating multicast socket"); while (running) { now = time(NULL); if (rtmo <= now) { if (ssdp_init(sd, &argv[optind], argc - optind) > 0) announce(1); rtmo = now + refresh; } if (itmo <= now) { announce(0); itmo = now + interval; } wait_message(MIN(rtmo, itmo)); } closelog(); return close_socket(); } /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
./CrossVul/dataset_final_sorted/CWE-119/c/bad_969_0
crossvul-cpp_data_good_3686_0
/* * super.c * * PURPOSE * Super block routines for the OSTA-UDF(tm) filesystem. * * DESCRIPTION * OSTA-UDF(tm) = Optical Storage Technology Association * Universal Disk Format. * * This code is based on version 2.00 of the UDF specification, * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346]. * http://www.osta.org/ * http://www.ecma.ch/ * http://www.iso.org/ * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 2000 Stelias Computing Inc * * HISTORY * * 09/24/98 dgb changed to allow compiling outside of kernel, and * added some debugging. * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34 * 10/16/98 attempting some multi-session support * 10/17/98 added freespace count for "df" * 11/11/98 gr added novrs option * 11/26/98 dgb added fileset,anchor mount options * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced * vol descs. rewrote option handling based on isofs * 12/20/98 find the free space bitmap (if it exists) */ #include "udfdecl.h" #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/parser.h> #include <linux/stat.h> #include <linux/cdrom.h> #include <linux/nls.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/vmalloc.h> #include <linux/errno.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/bitmap.h> #include <linux/crc-itu-t.h> #include <linux/log2.h> #include <asm/byteorder.h> #include "udf_sb.h" #include "udf_i.h" #include <linux/init.h> #include <asm/uaccess.h> #define VDS_POS_PRIMARY_VOL_DESC 0 #define VDS_POS_UNALLOC_SPACE_DESC 1 #define VDS_POS_LOGICAL_VOL_DESC 2 #define VDS_POS_PARTITION_DESC 3 #define VDS_POS_IMP_USE_VOL_DESC 4 #define VDS_POS_VOL_DESC_PTR 5 #define VDS_POS_TERMINATING_DESC 6 #define VDS_POS_LENGTH 7 #define UDF_DEFAULT_BLOCKSIZE 2048 enum { UDF_MAX_LINKS = 0xffff }; /* These are the "meat" - everything else is stuffing */ static int udf_fill_super(struct super_block *, void *, int); static void udf_put_super(struct super_block *); static int udf_sync_fs(struct super_block *, int); static int udf_remount_fs(struct super_block *, int *, char *); static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad); static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *, struct kernel_lb_addr *); static void udf_load_fileset(struct super_block *, struct buffer_head *, struct kernel_lb_addr *); static void udf_open_lvid(struct super_block *); static void udf_close_lvid(struct super_block *); static unsigned int udf_count_free(struct super_block *); static int udf_statfs(struct dentry *, struct kstatfs *); static int udf_show_options(struct seq_file *, struct dentry *); struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); __u32 offset = number_of_partitions * 2 * sizeof(uint32_t)/sizeof(uint8_t); return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); } /* UDF filesystem type */ static struct dentry *udf_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super); } static struct file_system_type udf_fstype = { .owner = THIS_MODULE, .name = "udf", .mount = udf_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static struct kmem_cache *udf_inode_cachep; static struct inode *udf_alloc_inode(struct super_block *sb) { struct udf_inode_info *ei; ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->i_unique = 0; ei->i_lenExtents = 0; ei->i_next_alloc_block = 0; ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; init_rwsem(&ei->i_data_sem); return &ei->vfs_inode; } static void udf_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } static void udf_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, udf_i_callback); } static void init_once(void *foo) { struct udf_inode_info *ei = (struct udf_inode_info *)foo; ei->i_ext.i_data = NULL; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { udf_inode_cachep = kmem_cache_create("udf_inode_cache", sizeof(struct udf_inode_info), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (!udf_inode_cachep) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(udf_inode_cachep); } /* Superblock operations */ static const struct super_operations udf_sb_ops = { .alloc_inode = udf_alloc_inode, .destroy_inode = udf_destroy_inode, .write_inode = udf_write_inode, .evict_inode = udf_evict_inode, .put_super = udf_put_super, .sync_fs = udf_sync_fs, .statfs = udf_statfs, .remount_fs = udf_remount_fs, .show_options = udf_show_options, }; struct udf_options { unsigned char novrs; unsigned int blocksize; unsigned int session; unsigned int lastblock; unsigned int anchor; unsigned int volume; unsigned short partition; unsigned int fileset; unsigned int rootdir; unsigned int flags; umode_t umask; gid_t gid; uid_t uid; umode_t fmode; umode_t dmode; struct nls_table *nls_map; }; static int __init init_udf_fs(void) { int err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&udf_fstype); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_udf_fs(void) { unregister_filesystem(&udf_fstype); destroy_inodecache(); } module_init(init_udf_fs) module_exit(exit_udf_fs) static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) { struct udf_sb_info *sbi = UDF_SB(sb); sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map), GFP_KERNEL); if (!sbi->s_partmaps) { udf_err(sb, "Unable to allocate space for %d partition maps\n", count); sbi->s_partitions = 0; return -ENOMEM; } sbi->s_partitions = count; return 0; } static int udf_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct udf_sb_info *sbi = UDF_SB(sb); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) seq_puts(seq, ",nostrict"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET)) seq_printf(seq, ",bs=%lu", sb->s_blocksize); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) seq_puts(seq, ",unhide"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) seq_puts(seq, ",undelete"); if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB)) seq_puts(seq, ",noadinicb"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD)) seq_puts(seq, ",shortad"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) seq_puts(seq, ",uid=forget"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE)) seq_puts(seq, ",uid=ignore"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) seq_puts(seq, ",gid=forget"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE)) seq_puts(seq, ",gid=ignore"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) seq_printf(seq, ",uid=%u", sbi->s_uid); if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) seq_printf(seq, ",gid=%u", sbi->s_gid); if (sbi->s_umask != 0) seq_printf(seq, ",umask=%ho", sbi->s_umask); if (sbi->s_fmode != UDF_INVALID_MODE) seq_printf(seq, ",mode=%ho", sbi->s_fmode); if (sbi->s_dmode != UDF_INVALID_MODE) seq_printf(seq, ",dmode=%ho", sbi->s_dmode); if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET)) seq_printf(seq, ",session=%u", sbi->s_session); if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET)) seq_printf(seq, ",lastblock=%u", sbi->s_last_block); if (sbi->s_anchor != 0) seq_printf(seq, ",anchor=%u", sbi->s_anchor); /* * volume, partition, fileset and rootdir seem to be ignored * currently */ if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) seq_puts(seq, ",utf8"); if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map) seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset); return 0; } /* * udf_parse_options * * PURPOSE * Parse mount options. * * DESCRIPTION * The following mount options are supported: * * gid= Set the default group. * umask= Set the default umask. * mode= Set the default file permissions. * dmode= Set the default directory permissions. * uid= Set the default user. * bs= Set the block size. * unhide Show otherwise hidden files. * undelete Show deleted files in lists. * adinicb Embed data in the inode (default) * noadinicb Don't embed data in the inode * shortad Use short ad's * longad Use long ad's (default) * nostrict Unset strict conformance * iocharset= Set the NLS character set * * The remaining are for debugging and disaster recovery: * * novrs Skip volume sequence recognition * * The following expect a offset from 0. * * session= Set the CDROM session (default= last session) * anchor= Override standard anchor location. (default= 256) * volume= Override the VolumeDesc location. (unused) * partition= Override the PartitionDesc location. (unused) * lastblock= Set the last block of the filesystem/ * * The following expect a offset from the partition root. * * fileset= Override the fileset block location. (unused) * rootdir= Override the root directory location. (unused) * WARNING: overriding the rootdir to a non-directory may * yield highly unpredictable results. * * PRE-CONDITIONS * options Pointer to mount options string. * uopts Pointer to mount options variable. * * POST-CONDITIONS * <return> 1 Mount options parsed okay. * <return> 0 Error parsing mount options. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ enum { Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete, Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock, Opt_anchor, Opt_volume, Opt_partition, Opt_fileset, Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore, Opt_fmode, Opt_dmode }; static const match_table_t tokens = { {Opt_novrs, "novrs"}, {Opt_nostrict, "nostrict"}, {Opt_bs, "bs=%u"}, {Opt_unhide, "unhide"}, {Opt_undelete, "undelete"}, {Opt_noadinicb, "noadinicb"}, {Opt_adinicb, "adinicb"}, {Opt_shortad, "shortad"}, {Opt_longad, "longad"}, {Opt_uforget, "uid=forget"}, {Opt_uignore, "uid=ignore"}, {Opt_gforget, "gid=forget"}, {Opt_gignore, "gid=ignore"}, {Opt_gid, "gid=%u"}, {Opt_uid, "uid=%u"}, {Opt_umask, "umask=%o"}, {Opt_session, "session=%u"}, {Opt_lastblock, "lastblock=%u"}, {Opt_anchor, "anchor=%u"}, {Opt_volume, "volume=%u"}, {Opt_partition, "partition=%u"}, {Opt_fileset, "fileset=%u"}, {Opt_rootdir, "rootdir=%u"}, {Opt_utf8, "utf8"}, {Opt_iocharset, "iocharset=%s"}, {Opt_fmode, "mode=%o"}, {Opt_dmode, "dmode=%o"}, {Opt_err, NULL} }; static int udf_parse_options(char *options, struct udf_options *uopt, bool remount) { char *p; int option; uopt->novrs = 0; uopt->partition = 0xFFFF; uopt->session = 0xFFFFFFFF; uopt->lastblock = 0; uopt->anchor = 0; uopt->volume = 0xFFFFFFFF; uopt->rootdir = 0xFFFFFFFF; uopt->fileset = 0xFFFFFFFF; uopt->nls_map = NULL; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_novrs: uopt->novrs = 1; break; case Opt_bs: if (match_int(&args[0], &option)) return 0; uopt->blocksize = option; uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); break; case Opt_unhide: uopt->flags |= (1 << UDF_FLAG_UNHIDE); break; case Opt_undelete: uopt->flags |= (1 << UDF_FLAG_UNDELETE); break; case Opt_noadinicb: uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB); break; case Opt_adinicb: uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB); break; case Opt_shortad: uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); break; case Opt_longad: uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); break; case Opt_gid: if (match_int(args, &option)) return 0; uopt->gid = option; uopt->flags |= (1 << UDF_FLAG_GID_SET); break; case Opt_uid: if (match_int(args, &option)) return 0; uopt->uid = option; uopt->flags |= (1 << UDF_FLAG_UID_SET); break; case Opt_umask: if (match_octal(args, &option)) return 0; uopt->umask = option; break; case Opt_nostrict: uopt->flags &= ~(1 << UDF_FLAG_STRICT); break; case Opt_session: if (match_int(args, &option)) return 0; uopt->session = option; if (!remount) uopt->flags |= (1 << UDF_FLAG_SESSION_SET); break; case Opt_lastblock: if (match_int(args, &option)) return 0; uopt->lastblock = option; if (!remount) uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); break; case Opt_anchor: if (match_int(args, &option)) return 0; uopt->anchor = option; break; case Opt_volume: if (match_int(args, &option)) return 0; uopt->volume = option; break; case Opt_partition: if (match_int(args, &option)) return 0; uopt->partition = option; break; case Opt_fileset: if (match_int(args, &option)) return 0; uopt->fileset = option; break; case Opt_rootdir: if (match_int(args, &option)) return 0; uopt->rootdir = option; break; case Opt_utf8: uopt->flags |= (1 << UDF_FLAG_UTF8); break; #ifdef CONFIG_UDF_NLS case Opt_iocharset: uopt->nls_map = load_nls(args[0].from); uopt->flags |= (1 << UDF_FLAG_NLS_MAP); break; #endif case Opt_uignore: uopt->flags |= (1 << UDF_FLAG_UID_IGNORE); break; case Opt_uforget: uopt->flags |= (1 << UDF_FLAG_UID_FORGET); break; case Opt_gignore: uopt->flags |= (1 << UDF_FLAG_GID_IGNORE); break; case Opt_gforget: uopt->flags |= (1 << UDF_FLAG_GID_FORGET); break; case Opt_fmode: if (match_octal(args, &option)) return 0; uopt->fmode = option & 0777; break; case Opt_dmode: if (match_octal(args, &option)) return 0; uopt->dmode = option & 0777; break; default: pr_err("bad mount option \"%s\" or missing value\n", p); return 0; } } return 1; } static int udf_remount_fs(struct super_block *sb, int *flags, char *options) { struct udf_options uopt; struct udf_sb_info *sbi = UDF_SB(sb); int error = 0; uopt.flags = sbi->s_flags; uopt.uid = sbi->s_uid; uopt.gid = sbi->s_gid; uopt.umask = sbi->s_umask; uopt.fmode = sbi->s_fmode; uopt.dmode = sbi->s_dmode; if (!udf_parse_options(options, &uopt, true)) return -EINVAL; write_lock(&sbi->s_cred_lock); sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; sbi->s_gid = uopt.gid; sbi->s_umask = uopt.umask; sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; write_unlock(&sbi->s_cred_lock); if (sbi->s_lvid_bh) { int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); if (write_rev > UDF_MAX_WRITE_VERSION) *flags |= MS_RDONLY; } if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) goto out_unlock; if (*flags & MS_RDONLY) udf_close_lvid(sb); else udf_open_lvid(sb); out_unlock: return error; } /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */ /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ static loff_t udf_check_vsd(struct super_block *sb) { struct volStructDesc *vsd = NULL; loff_t sector = 32768; int sectorsize; struct buffer_head *bh = NULL; int nsr02 = 0; int nsr03 = 0; struct udf_sb_info *sbi; sbi = UDF_SB(sb); if (sb->s_blocksize < sizeof(struct volStructDesc)) sectorsize = sizeof(struct volStructDesc); else sectorsize = sb->s_blocksize; sector += (sbi->s_session << sb->s_blocksize_bits); udf_debug("Starting at sector %u (%ld byte sectors)\n", (unsigned int)(sector >> sb->s_blocksize_bits), sb->s_blocksize); /* Process the sequence (if applicable) */ for (; !nsr02 && !nsr03; sector += sectorsize) { /* Read a block */ bh = udf_tread(sb, sector >> sb->s_blocksize_bits); if (!bh) break; /* Look for ISO descriptors */ vsd = (struct volStructDesc *)(bh->b_data + (sector & (sb->s_blocksize - 1))); if (vsd->stdIdent[0] == 0) { brelse(bh); break; } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) { switch (vsd->structType) { case 0: udf_debug("ISO9660 Boot Record found\n"); break; case 1: udf_debug("ISO9660 Primary Volume Descriptor found\n"); break; case 2: udf_debug("ISO9660 Supplementary Volume Descriptor found\n"); break; case 3: udf_debug("ISO9660 Volume Partition Descriptor found\n"); break; case 255: udf_debug("ISO9660 Volume Descriptor Set Terminator found\n"); break; default: udf_debug("ISO9660 VRS (%u) found\n", vsd->structType); break; } } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) ; /* nothing */ else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) { brelse(bh); break; } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) nsr02 = sector; else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) nsr03 = sector; brelse(bh); } if (nsr03) return nsr03; else if (nsr02) return nsr02; else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768) return -1; else return 0; } static int udf_find_fileset(struct super_block *sb, struct kernel_lb_addr *fileset, struct kernel_lb_addr *root) { struct buffer_head *bh = NULL; long lastblock; uint16_t ident; struct udf_sb_info *sbi; if (fileset->logicalBlockNum != 0xFFFFFFFF || fileset->partitionReferenceNum != 0xFFFF) { bh = udf_read_ptagged(sb, fileset, 0, &ident); if (!bh) { return 1; } else if (ident != TAG_IDENT_FSD) { brelse(bh); return 1; } } sbi = UDF_SB(sb); if (!bh) { /* Search backwards through the partitions */ struct kernel_lb_addr newfileset; /* --> cvg: FIXME - is it reasonable? */ return 1; for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; (newfileset.partitionReferenceNum != 0xFFFF && fileset->logicalBlockNum == 0xFFFFFFFF && fileset->partitionReferenceNum == 0xFFFF); newfileset.partitionReferenceNum--) { lastblock = sbi->s_partmaps [newfileset.partitionReferenceNum] .s_partition_len; newfileset.logicalBlockNum = 0; do { bh = udf_read_ptagged(sb, &newfileset, 0, &ident); if (!bh) { newfileset.logicalBlockNum++; continue; } switch (ident) { case TAG_IDENT_SBD: { struct spaceBitmapDesc *sp; sp = (struct spaceBitmapDesc *) bh->b_data; newfileset.logicalBlockNum += 1 + ((le32_to_cpu(sp->numOfBytes) + sizeof(struct spaceBitmapDesc) - 1) >> sb->s_blocksize_bits); brelse(bh); break; } case TAG_IDENT_FSD: *fileset = newfileset; break; default: newfileset.logicalBlockNum++; brelse(bh); bh = NULL; break; } } while (newfileset.logicalBlockNum < lastblock && fileset->logicalBlockNum == 0xFFFFFFFF && fileset->partitionReferenceNum == 0xFFFF); } } if ((fileset->logicalBlockNum != 0xFFFFFFFF || fileset->partitionReferenceNum != 0xFFFF) && bh) { udf_debug("Fileset at block=%d, partition=%d\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); sbi->s_partition = fileset->partitionReferenceNum; udf_load_fileset(sb, bh, root); brelse(bh); return 0; } return 1; } static int udf_load_pvoldesc(struct super_block *sb, sector_t block) { struct primaryVolDesc *pvoldesc; struct ustr *instr, *outstr; struct buffer_head *bh; uint16_t ident; int ret = 1; instr = kmalloc(sizeof(struct ustr), GFP_NOFS); if (!instr) return 1; outstr = kmalloc(sizeof(struct ustr), GFP_NOFS); if (!outstr) goto out1; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) goto out2; BUG_ON(ident != TAG_IDENT_PVD); pvoldesc = (struct primaryVolDesc *)bh->b_data; if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time, pvoldesc->recordingDateAndTime)) { #ifdef UDFFS_DEBUG struct timestamp *ts = &pvoldesc->recordingDateAndTime; udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n", le16_to_cpu(ts->year), ts->month, ts->day, ts->hour, ts->minute, le16_to_cpu(ts->typeAndTimezone)); #endif } if (!udf_build_ustr(instr, pvoldesc->volIdent, 32)) if (udf_CS0toUTF8(outstr, instr)) { strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name, outstr->u_len > 31 ? 31 : outstr->u_len); udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); } if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128)) if (udf_CS0toUTF8(outstr, instr)) udf_debug("volSetIdent[] = '%s'\n", outstr->u_name); brelse(bh); ret = 0; out2: kfree(outstr); out1: kfree(instr); return ret; } struct inode *udf_find_metadata_inode_efe(struct super_block *sb, u32 meta_file_loc, u32 partition_num) { struct kernel_lb_addr addr; struct inode *metadata_fe; addr.logicalBlockNum = meta_file_loc; addr.partitionReferenceNum = partition_num; metadata_fe = udf_iget(sb, &addr); if (metadata_fe == NULL) udf_warn(sb, "metadata inode efe not found\n"); else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); iput(metadata_fe); metadata_fe = NULL; } return metadata_fe; } static int udf_load_metadata_files(struct super_block *sb, int partition) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_meta_data *mdata; struct kernel_lb_addr addr; map = &sbi->s_partmaps[partition]; mdata = &map->s_type_specific.s_metadata; /* metadata address */ udf_debug("Metadata file location: block = %d part = %d\n", mdata->s_meta_file_loc, map->s_partition_num); mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, map->s_partition_num); if (mdata->s_metadata_fe == NULL) { /* mirror file entry */ udf_debug("Mirror metadata file location: block = %d part = %d\n", mdata->s_mirror_file_loc, map->s_partition_num); mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, map->s_partition_num); if (mdata->s_mirror_fe == NULL) { udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); goto error_exit; } } /* * bitmap file entry * Note: * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102) */ if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { addr.logicalBlockNum = mdata->s_bitmap_file_loc; addr.partitionReferenceNum = map->s_partition_num; udf_debug("Bitmap file location: block = %d part = %d\n", addr.logicalBlockNum, addr.partitionReferenceNum); mdata->s_bitmap_fe = udf_iget(sb, &addr); if (mdata->s_bitmap_fe == NULL) { if (sb->s_flags & MS_RDONLY) udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); else { udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); goto error_exit; } } } udf_debug("udf_load_metadata_files Ok\n"); return 0; error_exit: return 1; } static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, struct kernel_lb_addr *root) { struct fileSetDesc *fset; fset = (struct fileSetDesc *)bh->b_data; *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum); udf_debug("Rootdir at block=%d, partition=%d\n", root->logicalBlockNum, root->partitionReferenceNum); } int udf_compute_nr_groups(struct super_block *sb, u32 partition) { struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; return DIV_ROUND_UP(map->s_partition_len + (sizeof(struct spaceBitmapDesc) << 3), sb->s_blocksize * 8); } static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) { struct udf_bitmap *bitmap; int nr_groups; int size; nr_groups = udf_compute_nr_groups(sb, index); size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups); if (size <= PAGE_SIZE) bitmap = kzalloc(size, GFP_KERNEL); else bitmap = vzalloc(size); /* TODO: get rid of vzalloc */ if (bitmap == NULL) return NULL; bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1); bitmap->s_nr_groups = nr_groups; return bitmap; } static int udf_fill_partdesc_info(struct super_block *sb, struct partitionDesc *p, int p_index) { struct udf_part_map *map; struct udf_sb_info *sbi = UDF_SB(sb); struct partitionHeaderDesc *phd; map = &sbi->s_partmaps[p_index]; map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE)) map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE)) map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE; if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE)) map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE; udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n", p_index, map->s_partition_type, map->s_partition_root, map->s_partition_len); if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) && strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) return 0; phd = (struct partitionHeaderDesc *)p->partitionContentsUse; if (phd->unallocSpaceTable.extLength) { struct kernel_lb_addr loc = { .logicalBlockNum = le32_to_cpu( phd->unallocSpaceTable.extPosition), .partitionReferenceNum = p_index, }; map->s_uspace.s_table = udf_iget(sb, &loc); if (!map->s_uspace.s_table) { udf_debug("cannot load unallocSpaceTable (part %d)\n", p_index); return 1; } map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; udf_debug("unallocSpaceTable (part %d) @ %ld\n", p_index, map->s_uspace.s_table->i_ino); } if (phd->unallocSpaceBitmap.extLength) { struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); if (!bitmap) return 1; map->s_uspace.s_bitmap = bitmap; bitmap->s_extLength = le32_to_cpu( phd->unallocSpaceBitmap.extLength); bitmap->s_extPosition = le32_to_cpu( phd->unallocSpaceBitmap.extPosition); map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; udf_debug("unallocSpaceBitmap (part %d) @ %d\n", p_index, bitmap->s_extPosition); } if (phd->partitionIntegrityTable.extLength) udf_debug("partitionIntegrityTable (part %d)\n", p_index); if (phd->freedSpaceTable.extLength) { struct kernel_lb_addr loc = { .logicalBlockNum = le32_to_cpu( phd->freedSpaceTable.extPosition), .partitionReferenceNum = p_index, }; map->s_fspace.s_table = udf_iget(sb, &loc); if (!map->s_fspace.s_table) { udf_debug("cannot load freedSpaceTable (part %d)\n", p_index); return 1; } map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE; udf_debug("freedSpaceTable (part %d) @ %ld\n", p_index, map->s_fspace.s_table->i_ino); } if (phd->freedSpaceBitmap.extLength) { struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); if (!bitmap) return 1; map->s_fspace.s_bitmap = bitmap; bitmap->s_extLength = le32_to_cpu( phd->freedSpaceBitmap.extLength); bitmap->s_extPosition = le32_to_cpu( phd->freedSpaceBitmap.extPosition); map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP; udf_debug("freedSpaceBitmap (part %d) @ %d\n", p_index, bitmap->s_extPosition); } return 0; } static void udf_find_vat_block(struct super_block *sb, int p_index, int type1_index, sector_t start_block) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map = &sbi->s_partmaps[p_index]; sector_t vat_block; struct kernel_lb_addr ino; /* * VAT file entry is in the last recorded block. Some broken disks have * it a few blocks before so try a bit harder... */ ino.partitionReferenceNum = type1_index; for (vat_block = start_block; vat_block >= map->s_partition_root && vat_block >= start_block - 3 && !sbi->s_vat_inode; vat_block--) { ino.logicalBlockNum = vat_block - map->s_partition_root; sbi->s_vat_inode = udf_iget(sb, &ino); } } static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map = &sbi->s_partmaps[p_index]; struct buffer_head *bh = NULL; struct udf_inode_info *vati; uint32_t pos; struct virtualAllocationTable20 *vat20; sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); if (!sbi->s_vat_inode && sbi->s_last_block != blocks - 1) { pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n", (unsigned long)sbi->s_last_block, (unsigned long)blocks - 1); udf_find_vat_block(sb, p_index, type1_index, blocks - 1); } if (!sbi->s_vat_inode) return 1; if (map->s_partition_type == UDF_VIRTUAL_MAP15) { map->s_type_specific.s_virtual.s_start_offset = 0; map->s_type_specific.s_virtual.s_num_entries = (sbi->s_vat_inode->i_size - 36) >> 2; } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { vati = UDF_I(sbi->s_vat_inode); if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { pos = udf_block_map(sbi->s_vat_inode, 0); bh = sb_bread(sb, pos); if (!bh) return 1; vat20 = (struct virtualAllocationTable20 *)bh->b_data; } else { vat20 = (struct virtualAllocationTable20 *) vati->i_ext.i_data; } map->s_type_specific.s_virtual.s_start_offset = le16_to_cpu(vat20->lengthHeader); map->s_type_specific.s_virtual.s_num_entries = (sbi->s_vat_inode->i_size - map->s_type_specific.s_virtual. s_start_offset) >> 2; brelse(bh); } return 0; } static int udf_load_partdesc(struct super_block *sb, sector_t block) { struct buffer_head *bh; struct partitionDesc *p; struct udf_part_map *map; struct udf_sb_info *sbi = UDF_SB(sb); int i, type1_idx; uint16_t partitionNumber; uint16_t ident; int ret = 0; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return 1; if (ident != TAG_IDENT_PD) goto out_bh; p = (struct partitionDesc *)bh->b_data; partitionNumber = le16_to_cpu(p->partitionNumber); /* First scan for TYPE1, SPARABLE and METADATA partitions */ for (i = 0; i < sbi->s_partitions; i++) { map = &sbi->s_partmaps[i]; udf_debug("Searching map: (%d == %d)\n", map->s_partition_num, partitionNumber); if (map->s_partition_num == partitionNumber && (map->s_partition_type == UDF_TYPE1_MAP15 || map->s_partition_type == UDF_SPARABLE_MAP15)) break; } if (i >= sbi->s_partitions) { udf_debug("Partition (%d) not found in partition map\n", partitionNumber); goto out_bh; } ret = udf_fill_partdesc_info(sb, p, i); /* * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and * PHYSICAL partitions are already set up */ type1_idx = i; for (i = 0; i < sbi->s_partitions; i++) { map = &sbi->s_partmaps[i]; if (map->s_partition_num == partitionNumber && (map->s_partition_type == UDF_VIRTUAL_MAP15 || map->s_partition_type == UDF_VIRTUAL_MAP20 || map->s_partition_type == UDF_METADATA_MAP25)) break; } if (i >= sbi->s_partitions) goto out_bh; ret = udf_fill_partdesc_info(sb, p, i); if (ret) goto out_bh; if (map->s_partition_type == UDF_METADATA_MAP25) { ret = udf_load_metadata_files(sb, i); if (ret) { udf_err(sb, "error loading MetaData partition map %d\n", i); goto out_bh; } } else { ret = udf_load_vat(sb, i, type1_idx); if (ret) goto out_bh; /* * Mark filesystem read-only if we have a partition with * virtual map since we don't handle writing to it (we * overwrite blocks instead of relocating them). */ sb->s_flags |= MS_RDONLY; pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n"); } out_bh: /* In case loading failed, we handle cleanup in udf_fill_super */ brelse(bh); return ret; } static int udf_load_sparable_map(struct super_block *sb, struct udf_part_map *map, struct sparablePartitionMap *spm) { uint32_t loc; uint16_t ident; struct sparingTable *st; struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; int i; struct buffer_head *bh; map->s_partition_type = UDF_SPARABLE_MAP15; sdata->s_packet_len = le16_to_cpu(spm->packetLength); if (!is_power_of_2(sdata->s_packet_len)) { udf_err(sb, "error loading logical volume descriptor: " "Invalid packet length %u\n", (unsigned)sdata->s_packet_len); return -EIO; } if (spm->numSparingTables > 4) { udf_err(sb, "error loading logical volume descriptor: " "Too many sparing tables (%d)\n", (int)spm->numSparingTables); return -EIO; } for (i = 0; i < spm->numSparingTables; i++) { loc = le32_to_cpu(spm->locSparingTable[i]); bh = udf_read_tagged(sb, loc, loc, &ident); if (!bh) continue; st = (struct sparingTable *)bh->b_data; if (ident != 0 || strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING)) || sizeof(*st) + le16_to_cpu(st->reallocationTableLen) > sb->s_blocksize) { brelse(bh); continue; } sdata->s_spar_map[i] = bh; } map->s_partition_func = udf_get_pblock_spar15; return 0; } static int udf_load_logicalvol(struct super_block *sb, sector_t block, struct kernel_lb_addr *fileset) { struct logicalVolDesc *lvd; int i, offset; uint8_t type; struct udf_sb_info *sbi = UDF_SB(sb); struct genericPartitionMap *gpm; uint16_t ident; struct buffer_head *bh; unsigned int table_len; int ret = 0; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return 1; BUG_ON(ident != TAG_IDENT_LVD); lvd = (struct logicalVolDesc *)bh->b_data; table_len = le32_to_cpu(lvd->mapTableLength); if (sizeof(*lvd) + table_len > sb->s_blocksize) { udf_err(sb, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); goto out_bh; } ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); if (ret) goto out_bh; for (i = 0, offset = 0; i < sbi->s_partitions && offset < table_len; i++, offset += gpm->partitionMapLength) { struct udf_part_map *map = &sbi->s_partmaps[i]; gpm = (struct genericPartitionMap *) &(lvd->partitionMaps[offset]); type = gpm->partitionMapType; if (type == 1) { struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)gpm; map->s_partition_type = UDF_TYPE1_MAP15; map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); map->s_partition_num = le16_to_cpu(gpm1->partitionNum); map->s_partition_func = NULL; } else if (type == 2) { struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)gpm; if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) { u16 suf = le16_to_cpu(((__le16 *)upm2->partIdent. identSuffix)[0]); if (suf < 0x0200) { map->s_partition_type = UDF_VIRTUAL_MAP15; map->s_partition_func = udf_get_pblock_virt15; } else { map->s_partition_type = UDF_VIRTUAL_MAP20; map->s_partition_func = udf_get_pblock_virt20; } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { if (udf_load_sparable_map(sb, map, (struct sparablePartitionMap *)gpm) < 0) goto out_bh; } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { struct udf_meta_data *mdata = &map->s_type_specific.s_metadata; struct metadataPartitionMap *mdm = (struct metadataPartitionMap *) &(lvd->partitionMaps[offset]); udf_debug("Parsing Logical vol part %d type %d id=%s\n", i, type, UDF_ID_METADATA); map->s_partition_type = UDF_METADATA_MAP25; map->s_partition_func = udf_get_pblock_meta25; mdata->s_meta_file_loc = le32_to_cpu(mdm->metadataFileLoc); mdata->s_mirror_file_loc = le32_to_cpu(mdm->metadataMirrorFileLoc); mdata->s_bitmap_file_loc = le32_to_cpu(mdm->metadataBitmapFileLoc); mdata->s_alloc_unit_size = le32_to_cpu(mdm->allocUnitSize); mdata->s_align_unit_size = le16_to_cpu(mdm->alignUnitSize); if (mdm->flags & 0x01) mdata->s_flags |= MF_DUPLICATE_MD; udf_debug("Metadata Ident suffix=0x%x\n", le16_to_cpu(*(__le16 *) mdm->partIdent.identSuffix)); udf_debug("Metadata part num=%d\n", le16_to_cpu(mdm->partitionNum)); udf_debug("Metadata part alloc unit size=%d\n", le32_to_cpu(mdm->allocUnitSize)); udf_debug("Metadata file loc=%d\n", le32_to_cpu(mdm->metadataFileLoc)); udf_debug("Mirror file loc=%d\n", le32_to_cpu(mdm->metadataMirrorFileLoc)); udf_debug("Bitmap file loc=%d\n", le32_to_cpu(mdm->metadataBitmapFileLoc)); udf_debug("Flags: %d %d\n", mdata->s_flags, mdm->flags); } else { udf_debug("Unknown ident: %s\n", upm2->partIdent.ident); continue; } map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); map->s_partition_num = le16_to_cpu(upm2->partitionNum); } udf_debug("Partition (%d:%d) type %d on volume %d\n", i, map->s_partition_num, type, map->s_volumeseqnum); } if (fileset) { struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]); *fileset = lelb_to_cpu(la->extLocation); udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); } if (lvd->integritySeqExt.extLength) udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); out_bh: brelse(bh); return ret; } /* * udf_load_logicalvolint * */ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc) { struct buffer_head *bh = NULL; uint16_t ident; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; while (loc.extLength > 0 && (bh = udf_read_tagged(sb, loc.extLocation, loc.extLocation, &ident)) && ident == TAG_IDENT_LVID) { sbi->s_lvid_bh = bh; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; if (lvid->nextIntegrityExt.extLength) udf_load_logicalvolint(sb, leea_to_cpu(lvid->nextIntegrityExt)); if (sbi->s_lvid_bh != bh) brelse(bh); loc.extLength -= sb->s_blocksize; loc.extLocation++; } if (sbi->s_lvid_bh != bh) brelse(bh); } /* * udf_process_sequence * * PURPOSE * Process a main/reserve volume descriptor sequence. * * PRE-CONDITIONS * sb Pointer to _locked_ superblock. * block First block of first extent of the sequence. * lastblock Lastblock of first extent of the sequence. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ static noinline int udf_process_sequence(struct super_block *sb, long block, long lastblock, struct kernel_lb_addr *fileset) { struct buffer_head *bh = NULL; struct udf_vds_record vds[VDS_POS_LENGTH]; struct udf_vds_record *curr; struct generic_desc *gd; struct volDescPtr *vdp; int done = 0; uint32_t vdsn; uint16_t ident; long next_s = 0, next_e = 0; memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); /* * Read the main descriptor sequence and find which descriptors * are in it. */ for (; (!done && block <= lastblock); block++) { bh = udf_read_tagged(sb, block, block, &ident); if (!bh) { udf_err(sb, "Block %llu of volume descriptor sequence is corrupted or we could not read it\n", (unsigned long long)block); return 1; } /* Process each descriptor (ISO 13346 3/8.3-8.4) */ gd = (struct generic_desc *)bh->b_data; vdsn = le32_to_cpu(gd->volDescSeqNum); switch (ident) { case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; } break; case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ curr = &vds[VDS_POS_VOL_DESC_PTR]; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; vdp = (struct volDescPtr *)bh->b_data; next_s = le32_to_cpu( vdp->nextVolDescSeqExt.extLocation); next_e = le32_to_cpu( vdp->nextVolDescSeqExt.extLength); next_e = next_e >> sb->s_blocksize_bits; next_e += next_s; } break; case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; } break; case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ curr = &vds[VDS_POS_PARTITION_DESC]; if (!curr->block) curr->block = block; break; case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ curr = &vds[VDS_POS_LOGICAL_VOL_DESC]; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; } break; case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ curr = &vds[VDS_POS_UNALLOC_SPACE_DESC]; if (vdsn >= curr->volDescSeqNum) { curr->volDescSeqNum = vdsn; curr->block = block; } break; case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ vds[VDS_POS_TERMINATING_DESC].block = block; if (next_e) { block = next_s; lastblock = next_e; next_s = next_e = 0; } else done = 1; break; } brelse(bh); } /* * Now read interesting descriptors again and process them * in a suitable order */ if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) { udf_err(sb, "Primary Volume Descriptor not found!\n"); return 1; } if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block)) return 1; if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb, vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset)) return 1; if (vds[VDS_POS_PARTITION_DESC].block) { /* * We rescan the whole descriptor sequence to find * partition descriptor blocks and process them. */ for (block = vds[VDS_POS_PARTITION_DESC].block; block < vds[VDS_POS_TERMINATING_DESC].block; block++) if (udf_load_partdesc(sb, block)) return 1; } return 0; } static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh, struct kernel_lb_addr *fileset) { struct anchorVolDescPtr *anchor; long main_s, main_e, reserve_s, reserve_e; anchor = (struct anchorVolDescPtr *)bh->b_data; /* Locate the main sequence */ main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); main_e = main_e >> sb->s_blocksize_bits; main_e += main_s; /* Locate the reserve sequence */ reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); reserve_e = reserve_e >> sb->s_blocksize_bits; reserve_e += reserve_s; /* Process the main & reserve sequences */ /* responsible for finding the PartitionDesc(s) */ if (!udf_process_sequence(sb, main_s, main_e, fileset)) return 1; return !udf_process_sequence(sb, reserve_s, reserve_e, fileset); } /* * Check whether there is an anchor block in the given block and * load Volume Descriptor Sequence if so. */ static int udf_check_anchor_block(struct super_block *sb, sector_t block, struct kernel_lb_addr *fileset) { struct buffer_head *bh; uint16_t ident; int ret; if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) && udf_fixed_to_variable(block) >= sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits) return 0; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return 0; if (ident != TAG_IDENT_AVDP) { brelse(bh); return 0; } ret = udf_load_sequence(sb, bh, fileset); brelse(bh); return ret; } /* Search for an anchor volume descriptor pointer */ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock, struct kernel_lb_addr *fileset) { sector_t last[6]; int i; struct udf_sb_info *sbi = UDF_SB(sb); int last_count = 0; /* First try user provided anchor */ if (sbi->s_anchor) { if (udf_check_anchor_block(sb, sbi->s_anchor, fileset)) return lastblock; } /* * according to spec, anchor is in either: * block 256 * lastblock-256 * lastblock * however, if the disc isn't closed, it could be 512. */ if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset)) return lastblock; /* * The trouble is which block is the last one. Drives often misreport * this so we try various possibilities. */ last[last_count++] = lastblock; if (lastblock >= 1) last[last_count++] = lastblock - 1; last[last_count++] = lastblock + 1; if (lastblock >= 2) last[last_count++] = lastblock - 2; if (lastblock >= 150) last[last_count++] = lastblock - 150; if (lastblock >= 152) last[last_count++] = lastblock - 152; for (i = 0; i < last_count; i++) { if (last[i] >= sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits) continue; if (udf_check_anchor_block(sb, last[i], fileset)) return last[i]; if (last[i] < 256) continue; if (udf_check_anchor_block(sb, last[i] - 256, fileset)) return last[i]; } /* Finally try block 512 in case media is open */ if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset)) return last[0]; return 0; } /* * Find an anchor volume descriptor and load Volume Descriptor Sequence from * area specified by it. The function expects sbi->s_lastblock to be the last * block on the media. * * Return 1 if ok, 0 if not found. * */ static int udf_find_anchor(struct super_block *sb, struct kernel_lb_addr *fileset) { sector_t lastblock; struct udf_sb_info *sbi = UDF_SB(sb); lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset); if (lastblock) goto out; /* No anchor found? Try VARCONV conversion of block numbers */ UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); /* Firstly, we try to not convert number of the last block */ lastblock = udf_scan_anchors(sb, udf_variable_to_fixed(sbi->s_last_block), fileset); if (lastblock) goto out; /* Secondly, we try with converted number of the last block */ lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset); if (!lastblock) { /* VARCONV didn't help. Clear it. */ UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV); return 0; } out: sbi->s_last_block = lastblock; return 1; } /* * Check Volume Structure Descriptor, find Anchor block and load Volume * Descriptor Sequence */ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, int silent, struct kernel_lb_addr *fileset) { struct udf_sb_info *sbi = UDF_SB(sb); loff_t nsr_off; if (!sb_set_blocksize(sb, uopt->blocksize)) { if (!silent) udf_warn(sb, "Bad block size\n"); return 0; } sbi->s_last_block = uopt->lastblock; if (!uopt->novrs) { /* Check that it is NSR02 compliant */ nsr_off = udf_check_vsd(sb); if (!nsr_off) { if (!silent) udf_warn(sb, "No VRS found\n"); return 0; } if (nsr_off == -1) udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n"); if (!sbi->s_last_block) sbi->s_last_block = udf_get_last_block(sb); } else { udf_debug("Validity check skipped because of novrs option\n"); } /* Look for anchor block and load Volume Descriptor Sequence */ sbi->s_anchor = uopt->anchor; if (!udf_find_anchor(sb, fileset)) { if (!silent) udf_warn(sb, "No anchor found\n"); return 0; } return 1; } static void udf_open_lvid(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; struct logicalVolIntegrityDescImpUse *lvidiu; if (!bh) return; mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sbi); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); lvid->descTag.descCRC = cpu_to_le16( crc_itu_t(0, (char *)lvid + sizeof(struct tag), le16_to_cpu(lvid->descTag.descCRCLength))); lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; mutex_unlock(&sbi->s_alloc_mutex); } static void udf_close_lvid(struct super_block *sb) { struct udf_sb_info *sbi = UDF_SB(sb); struct buffer_head *bh = sbi->s_lvid_bh; struct logicalVolIntegrityDesc *lvid; struct logicalVolIntegrityDescImpUse *lvidiu; if (!bh) return; mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sbi); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); lvid->descTag.descCRC = cpu_to_le16( crc_itu_t(0, (char *)lvid + sizeof(struct tag), le16_to_cpu(lvid->descTag.descCRCLength))); lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); /* * We set buffer uptodate unconditionally here to avoid spurious * warnings from mark_buffer_dirty() when previous EIO has marked * the buffer as !uptodate */ set_buffer_uptodate(bh); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; mutex_unlock(&sbi->s_alloc_mutex); } u64 lvid_get_unique_id(struct super_block *sb) { struct buffer_head *bh; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; struct logicalVolHeaderDesc *lvhd; u64 uniqueID; u64 ret; bh = sbi->s_lvid_bh; if (!bh) return 0; lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse; mutex_lock(&sbi->s_alloc_mutex); ret = uniqueID = le64_to_cpu(lvhd->uniqueID); if (!(++uniqueID & 0xFFFFFFFF)) uniqueID += 16; lvhd->uniqueID = cpu_to_le64(uniqueID); mutex_unlock(&sbi->s_alloc_mutex); mark_buffer_dirty(bh); return ret; } static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) { int i; int nr_groups = bitmap->s_nr_groups; int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups); for (i = 0; i < nr_groups; i++) if (bitmap->s_block_bitmap[i]) brelse(bitmap->s_block_bitmap[i]); if (size <= PAGE_SIZE) kfree(bitmap); else vfree(bitmap); } static void udf_free_partition(struct udf_part_map *map) { int i; struct udf_meta_data *mdata; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) iput(map->s_uspace.s_table); if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) iput(map->s_fspace.s_table); if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) udf_sb_free_bitmap(map->s_uspace.s_bitmap); if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) udf_sb_free_bitmap(map->s_fspace.s_bitmap); if (map->s_partition_type == UDF_SPARABLE_MAP15) for (i = 0; i < 4; i++) brelse(map->s_type_specific.s_sparing.s_spar_map[i]); else if (map->s_partition_type == UDF_METADATA_MAP25) { mdata = &map->s_type_specific.s_metadata; iput(mdata->s_metadata_fe); mdata->s_metadata_fe = NULL; iput(mdata->s_mirror_fe); mdata->s_mirror_fe = NULL; iput(mdata->s_bitmap_fe); mdata->s_bitmap_fe = NULL; } } static int udf_fill_super(struct super_block *sb, void *options, int silent) { int i; int ret; struct inode *inode = NULL; struct udf_options uopt; struct kernel_lb_addr rootdir, fileset; struct udf_sb_info *sbi; uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); uopt.uid = -1; uopt.gid = -1; uopt.umask = 0; uopt.fmode = UDF_INVALID_MODE; uopt.dmode = UDF_INVALID_MODE; sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; mutex_init(&sbi->s_alloc_mutex); if (!udf_parse_options((char *)options, &uopt, false)) goto error_out; if (uopt.flags & (1 << UDF_FLAG_UTF8) && uopt.flags & (1 << UDF_FLAG_NLS_MAP)) { udf_err(sb, "utf8 cannot be combined with iocharset\n"); goto error_out; } #ifdef CONFIG_UDF_NLS if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) { uopt.nls_map = load_nls_default(); if (!uopt.nls_map) uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP); else udf_debug("Using default NLS map\n"); } #endif if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP))) uopt.flags |= (1 << UDF_FLAG_UTF8); fileset.logicalBlockNum = 0xFFFFFFFF; fileset.partitionReferenceNum = 0xFFFF; sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; sbi->s_gid = uopt.gid; sbi->s_umask = uopt.umask; sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; sbi->s_nls_map = uopt.nls_map; rwlock_init(&sbi->s_cred_lock); if (uopt.session == 0xFFFFFFFF) sbi->s_session = udf_get_last_session(sb); else sbi->s_session = uopt.session; udf_debug("Multi-session=%d\n", sbi->s_session); /* Fill in the rest of the superblock */ sb->s_op = &udf_sb_ops; sb->s_export_op = &udf_export_ops; sb->s_dirt = 0; sb->s_magic = UDF_SUPER_MAGIC; sb->s_time_gran = 1000; if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { ret = udf_load_vrs(sb, &uopt, silent, &fileset); } else { uopt.blocksize = bdev_logical_block_size(sb->s_bdev); ret = udf_load_vrs(sb, &uopt, silent, &fileset); if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { if (!silent) pr_notice("Rescanning with blocksize %d\n", UDF_DEFAULT_BLOCKSIZE); uopt.blocksize = UDF_DEFAULT_BLOCKSIZE; ret = udf_load_vrs(sb, &uopt, silent, &fileset); } } if (!ret) { udf_warn(sb, "No partition found (1)\n"); goto error_out; } udf_debug("Lastblock=%d\n", sbi->s_last_block); if (sbi->s_lvid_bh) { struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sbi); uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); /* uint16_t maxUDFWriteRev = le16_to_cpu(lvidiu->maxUDFWriteRev); */ if (minUDFReadRev > UDF_MAX_READ_VERSION) { udf_err(sb, "minUDFReadRev=%x (max is %x)\n", le16_to_cpu(lvidiu->minUDFReadRev), UDF_MAX_READ_VERSION); goto error_out; } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) sb->s_flags |= MS_RDONLY; sbi->s_udfrev = minUDFWriteRev; if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); if (minUDFReadRev >= UDF_VERS_USE_STREAMS) UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); } if (!sbi->s_partitions) { udf_warn(sb, "No partition found (2)\n"); goto error_out; } if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & UDF_PART_FLAG_READ_ONLY) { pr_notice("Partition marked readonly; forcing readonly mount\n"); sb->s_flags |= MS_RDONLY; } if (udf_find_fileset(sb, &fileset, &rootdir)) { udf_warn(sb, "No fileset found\n"); goto error_out; } if (!silent) { struct timestamp ts; udf_time_to_disk_stamp(&ts, sbi->s_record_time); udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n", sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day, ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone)); } if (!(sb->s_flags & MS_RDONLY)) udf_open_lvid(sb); /* Assign the root inode */ /* assign inodes by physical block number */ /* perhaps it's not extensible enough, but for now ... */ inode = udf_iget(sb, &rootdir); if (!inode) { udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n", rootdir.logicalBlockNum, rootdir.partitionReferenceNum); goto error_out; } /* Allocate a dentry for the root inode */ sb->s_root = d_make_root(inode); if (!sb->s_root) { udf_err(sb, "Couldn't allocate root dentry\n"); goto error_out; } sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_max_links = UDF_MAX_LINKS; return 0; error_out: if (sbi->s_vat_inode) iput(sbi->s_vat_inode); if (sbi->s_partitions) for (i = 0; i < sbi->s_partitions; i++) udf_free_partition(&sbi->s_partmaps[i]); #ifdef CONFIG_UDF_NLS if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) unload_nls(sbi->s_nls_map); #endif if (!(sb->s_flags & MS_RDONLY)) udf_close_lvid(sb); brelse(sbi->s_lvid_bh); kfree(sbi->s_partmaps); kfree(sbi); sb->s_fs_info = NULL; return -EINVAL; } void _udf_err(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; /* mark sb error */ if (!(sb->s_flags & MS_RDONLY)) sb->s_dirt = 1; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf); va_end(args); } void _udf_warn(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf); va_end(args); } static void udf_put_super(struct super_block *sb) { int i; struct udf_sb_info *sbi; sbi = UDF_SB(sb); if (sbi->s_vat_inode) iput(sbi->s_vat_inode); if (sbi->s_partitions) for (i = 0; i < sbi->s_partitions; i++) udf_free_partition(&sbi->s_partmaps[i]); #ifdef CONFIG_UDF_NLS if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) unload_nls(sbi->s_nls_map); #endif if (!(sb->s_flags & MS_RDONLY)) udf_close_lvid(sb); brelse(sbi->s_lvid_bh); kfree(sbi->s_partmaps); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } static int udf_sync_fs(struct super_block *sb, int wait) { struct udf_sb_info *sbi = UDF_SB(sb); mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvid_dirty) { /* * Blockdevice will be synced later so we don't have to submit * the buffer for IO */ mark_buffer_dirty(sbi->s_lvid_bh); sb->s_dirt = 0; sbi->s_lvid_dirty = 0; } mutex_unlock(&sbi->s_alloc_mutex); return 0; } static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDescImpUse *lvidiu; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); if (sbi->s_lvid_bh != NULL) lvidiu = udf_sb_lvidiu(sbi); else lvidiu = NULL; buf->f_type = UDF_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; buf->f_bfree = udf_count_free(sb); buf->f_bavail = buf->f_bfree; buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) + le32_to_cpu(lvidiu->numDirs)) : 0) + buf->f_bfree; buf->f_ffree = buf->f_bfree; buf->f_namelen = UDF_NAME_LEN - 2; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap) { struct buffer_head *bh = NULL; unsigned int accum = 0; int index; int block = 0, newblock; struct kernel_lb_addr loc; uint32_t bytes; uint8_t *ptr; uint16_t ident; struct spaceBitmapDesc *bm; loc.logicalBlockNum = bitmap->s_extPosition; loc.partitionReferenceNum = UDF_SB(sb)->s_partition; bh = udf_read_ptagged(sb, &loc, 0, &ident); if (!bh) { udf_err(sb, "udf_count_free failed\n"); goto out; } else if (ident != TAG_IDENT_SBD) { brelse(bh); udf_err(sb, "udf_count_free failed\n"); goto out; } bm = (struct spaceBitmapDesc *)bh->b_data; bytes = le32_to_cpu(bm->numOfBytes); index = sizeof(struct spaceBitmapDesc); /* offset in first block only */ ptr = (uint8_t *)bh->b_data; while (bytes > 0) { u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index); accum += bitmap_weight((const unsigned long *)(ptr + index), cur_bytes * 8); bytes -= cur_bytes; if (bytes) { brelse(bh); newblock = udf_get_lb_pblock(sb, &loc, ++block); bh = udf_tread(sb, newblock); if (!bh) { udf_debug("read failed\n"); goto out; } index = 0; ptr = (uint8_t *)bh->b_data; } } brelse(bh); out: return accum; } static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table) { unsigned int accum = 0; uint32_t elen; struct kernel_lb_addr eloc; int8_t etype; struct extent_position epos; mutex_lock(&UDF_SB(sb)->s_alloc_mutex); epos.block = UDF_I(table)->i_location; epos.offset = sizeof(struct unallocSpaceEntry); epos.bh = NULL; while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) accum += (elen >> table->i_sb->s_blocksize_bits); brelse(epos.bh); mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); return accum; } static unsigned int udf_count_free(struct super_block *sb) { unsigned int accum = 0; struct udf_sb_info *sbi; struct udf_part_map *map; sbi = UDF_SB(sb); if (sbi->s_lvid_bh) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *) sbi->s_lvid_bh->b_data; if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) { accum = le32_to_cpu( lvid->freeSpaceTable[sbi->s_partition]); if (accum == 0xFFFFFFFF) accum = 0; } } if (accum) return accum; map = &sbi->s_partmaps[sbi->s_partition]; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { accum += udf_count_free_bitmap(sb, map->s_uspace.s_bitmap); } if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { accum += udf_count_free_bitmap(sb, map->s_fspace.s_bitmap); } if (accum) return accum; if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { accum += udf_count_free_table(sb, map->s_uspace.s_table); } if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { accum += udf_count_free_table(sb, map->s_fspace.s_table); } return accum; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_3686_0
crossvul-cpp_data_good_346_4
/* * PKCS15 emulation layer for EstEID card. * * Copyright (C) 2004, Martin Paljak <martin@martinpaljak.net> * Copyright (C) 2004, Bud P. Bruegger <bud@comune.grosseto.it> * Copyright (C) 2004, Antonino Iacono <ant_iacono@tin.it> * Copyright (C) 2003, Olaf Kirch <okir@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "common/compat_strlcpy.h" #include "common/compat_strlcat.h" #include "internal.h" #include "opensc.h" #include "pkcs15.h" #include "esteid.h" int sc_pkcs15emu_esteid_init_ex(sc_pkcs15_card_t *, struct sc_aid *, sc_pkcs15emu_opt_t *); static void set_string (char **strp, const char *value) { if (*strp) free (*strp); *strp = value ? strdup (value) : NULL; } int select_esteid_df (sc_card_t * card) { int r; sc_path_t tmppath; sc_format_path ("3F00EEEE", &tmppath); r = sc_select_file (card, &tmppath, NULL); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "esteid select DF failed"); return r; } static int sc_pkcs15emu_esteid_init (sc_pkcs15_card_t * p15card) { sc_card_t *card = p15card->card; unsigned char buff[128]; int r, i; size_t field_length = 0, modulus_length = 0; sc_path_t tmppath; set_string (&p15card->tokeninfo->label, "ID-kaart"); set_string (&p15card->tokeninfo->manufacturer_id, "AS Sertifitseerimiskeskus"); /* Select application directory */ sc_format_path ("3f00eeee5044", &tmppath); r = sc_select_file (card, &tmppath, NULL); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "select esteid PD failed"); /* read the serial (document number) */ r = sc_read_record (card, SC_ESTEID_PD_DOCUMENT_NR, buff, sizeof(buff), SC_RECORD_BY_REC_NR); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "read document number failed"); buff[MIN((size_t) r, (sizeof buff)-1)] = '\0'; set_string (&p15card->tokeninfo->serial_number, (const char *) buff); p15card->tokeninfo->flags = SC_PKCS15_TOKEN_PRN_GENERATION | SC_PKCS15_TOKEN_EID_COMPLIANT | SC_PKCS15_TOKEN_READONLY; /* add certificates */ for (i = 0; i < 2; i++) { static const char *esteid_cert_names[2] = { "Isikutuvastus", "Allkirjastamine"}; static char const *esteid_cert_paths[2] = { "3f00eeeeaace", "3f00eeeeddce"}; static int esteid_cert_ids[2] = {1, 2}; struct sc_pkcs15_cert_info cert_info; struct sc_pkcs15_object cert_obj; memset(&cert_info, 0, sizeof(cert_info)); memset(&cert_obj, 0, sizeof(cert_obj)); cert_info.id.value[0] = esteid_cert_ids[i]; cert_info.id.len = 1; sc_format_path(esteid_cert_paths[i], &cert_info.path); strlcpy(cert_obj.label, esteid_cert_names[i], sizeof(cert_obj.label)); r = sc_pkcs15emu_add_x509_cert(p15card, &cert_obj, &cert_info); if (r < 0) return SC_ERROR_INTERNAL; if (i == 0) { sc_pkcs15_cert_t *cert = NULL; r = sc_pkcs15_read_certificate(p15card, &cert_info, &cert); if (r < 0) return SC_ERROR_INTERNAL; if (cert->key->algorithm == SC_ALGORITHM_EC) field_length = cert->key->u.ec.params.field_length; else modulus_length = cert->key->u.rsa.modulus.len * 8; if (r == SC_SUCCESS) { static const struct sc_object_id cn_oid = {{ 2, 5, 4, 3, -1 }}; u8 *cn_name = NULL; size_t cn_len = 0; sc_pkcs15_get_name_from_dn(card->ctx, cert->subject, cert->subject_len, &cn_oid, &cn_name, &cn_len); if (cn_len > 0) { char *token_name = malloc(cn_len+1); if (token_name) { memcpy(token_name, cn_name, cn_len); token_name[cn_len] = '\0'; set_string(&p15card->tokeninfo->label, (const char*)token_name); free(token_name); } } free(cn_name); sc_pkcs15_free_certificate(cert); } } } /* the file with key pin info (tries left) */ sc_format_path ("3f000016", &tmppath); r = sc_select_file (card, &tmppath, NULL); if (r < 0) return SC_ERROR_INTERNAL; /* add pins */ for (i = 0; i < 3; i++) { unsigned char tries_left; static const char *esteid_pin_names[3] = { "PIN1", "PIN2", "PUK" }; static const int esteid_pin_min[3] = {4, 5, 8}; static const int esteid_pin_ref[3] = {1, 2, 0}; static const int esteid_pin_authid[3] = {1, 2, 3}; static const int esteid_pin_flags[3] = {0, 0, SC_PKCS15_PIN_FLAG_UNBLOCKING_PIN}; struct sc_pkcs15_auth_info pin_info; struct sc_pkcs15_object pin_obj; memset(&pin_info, 0, sizeof(pin_info)); memset(&pin_obj, 0, sizeof(pin_obj)); /* read the number of tries left for the PIN */ r = sc_read_record (card, i + 1, buff, sizeof(buff), SC_RECORD_BY_REC_NR); if (r < 0) return SC_ERROR_INTERNAL; tries_left = buff[5]; pin_info.auth_id.len = 1; pin_info.auth_id.value[0] = esteid_pin_authid[i]; pin_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; pin_info.attrs.pin.reference = esteid_pin_ref[i]; pin_info.attrs.pin.flags = esteid_pin_flags[i]; pin_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; pin_info.attrs.pin.min_length = esteid_pin_min[i]; pin_info.attrs.pin.stored_length = 12; pin_info.attrs.pin.max_length = 12; pin_info.attrs.pin.pad_char = '\0'; pin_info.tries_left = (int)tries_left; pin_info.max_tries = 3; strlcpy(pin_obj.label, esteid_pin_names[i], sizeof(pin_obj.label)); pin_obj.flags = esteid_pin_flags[i]; /* Link normal PINs with PUK */ if (i < 2) { pin_obj.auth_id.len = 1; pin_obj.auth_id.value[0] = 3; } r = sc_pkcs15emu_add_pin_obj(p15card, &pin_obj, &pin_info); if (r < 0) return SC_ERROR_INTERNAL; } /* add private keys */ for (i = 0; i < 2; i++) { static int prkey_pin[2] = {1, 2}; static const char *prkey_name[2] = { "Isikutuvastus", "Allkirjastamine"}; struct sc_pkcs15_prkey_info prkey_info; struct sc_pkcs15_object prkey_obj; memset(&prkey_info, 0, sizeof(prkey_info)); memset(&prkey_obj, 0, sizeof(prkey_obj)); prkey_info.id.len = 1; prkey_info.id.value[0] = prkey_pin[i]; prkey_info.native = 1; prkey_info.key_reference = i + 1; prkey_info.field_length = field_length; prkey_info.modulus_length = modulus_length; if (i == 1) prkey_info.usage = SC_PKCS15_PRKEY_USAGE_NONREPUDIATION; else if(field_length > 0) // ECC has sign and derive usage prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_DERIVE; else prkey_info.usage = SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_DECRYPT; strlcpy(prkey_obj.label, prkey_name[i], sizeof(prkey_obj.label)); prkey_obj.auth_id.len = 1; prkey_obj.auth_id.value[0] = prkey_pin[i]; prkey_obj.user_consent = 0; prkey_obj.flags = SC_PKCS15_CO_FLAG_PRIVATE; if(field_length > 0) r = sc_pkcs15emu_add_ec_prkey(p15card, &prkey_obj, &prkey_info); else r = sc_pkcs15emu_add_rsa_prkey(p15card, &prkey_obj, &prkey_info); if (r < 0) return SC_ERROR_INTERNAL; } return SC_SUCCESS; } static int esteid_detect_card(sc_pkcs15_card_t *p15card) { if (is_esteid_card(p15card->card)) return SC_SUCCESS; else return SC_ERROR_WRONG_CARD; } int sc_pkcs15emu_esteid_init_ex(sc_pkcs15_card_t *p15card, struct sc_aid *aid, sc_pkcs15emu_opt_t *opts) { if (opts && opts->flags & SC_PKCS15EMU_FLAGS_NO_CHECK) return sc_pkcs15emu_esteid_init(p15card); else { int r = esteid_detect_card(p15card); if (r) return SC_ERROR_WRONG_CARD; return sc_pkcs15emu_esteid_init(p15card); } }
./CrossVul/dataset_final_sorted/CWE-119/c/good_346_4
crossvul-cpp_data_good_3908_0
/** * FreeRDP: A Remote Desktop Protocol Implementation * Update Data PDUs * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <winpr/synch.h> #include <winpr/thread.h> #include <winpr/collections.h> #include "update.h" #include "surface.h" #include "message.h" #include "info.h" #include "window.h" #include <freerdp/log.h> #include <freerdp/peer.h> #include <freerdp/codec/bitmap.h> #include "../cache/pointer.h" #include "../cache/palette.h" #include "../cache/bitmap.h" #define TAG FREERDP_TAG("core.update") static const char* const UPDATE_TYPE_STRINGS[] = { "Orders", "Bitmap", "Palette", "Synchronize" }; static const char* update_type_to_string(UINT16 updateType) { if (updateType >= ARRAYSIZE(UPDATE_TYPE_STRINGS)) return "UNKNOWN"; return UPDATE_TYPE_STRINGS[updateType]; } static BOOL update_recv_orders(rdpUpdate* update, wStream* s) { UINT16 numberOrders; if (Stream_GetRemainingLength(s) < 6) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 6"); return FALSE; } Stream_Seek_UINT16(s); /* pad2OctetsA (2 bytes) */ Stream_Read_UINT16(s, numberOrders); /* numberOrders (2 bytes) */ Stream_Seek_UINT16(s); /* pad2OctetsB (2 bytes) */ while (numberOrders > 0) { if (!update_recv_order(update, s)) { WLog_ERR(TAG, "update_recv_order() failed"); return FALSE; } numberOrders--; } return TRUE; } static BOOL update_read_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { WINPR_UNUSED(update); if (Stream_GetRemainingLength(s) < 18) return FALSE; Stream_Read_UINT16(s, bitmapData->destLeft); Stream_Read_UINT16(s, bitmapData->destTop); Stream_Read_UINT16(s, bitmapData->destRight); Stream_Read_UINT16(s, bitmapData->destBottom); Stream_Read_UINT16(s, bitmapData->width); Stream_Read_UINT16(s, bitmapData->height); Stream_Read_UINT16(s, bitmapData->bitsPerPixel); Stream_Read_UINT16(s, bitmapData->flags); Stream_Read_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Read_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ bitmapData->bitmapLength = bitmapData->cbCompMainBodySize; } bitmapData->compressed = TRUE; } else bitmapData->compressed = FALSE; if (Stream_GetRemainingLength(s) < bitmapData->bitmapLength) return FALSE; if (bitmapData->bitmapLength > 0) { bitmapData->bitmapDataStream = malloc(bitmapData->bitmapLength); if (!bitmapData->bitmapDataStream) return FALSE; memcpy(bitmapData->bitmapDataStream, Stream_Pointer(s), bitmapData->bitmapLength); Stream_Seek(s, bitmapData->bitmapLength); } return TRUE; } static BOOL update_write_bitmap_data(rdpUpdate* update, wStream* s, BITMAP_DATA* bitmapData) { if (!Stream_EnsureRemainingCapacity(s, 64 + bitmapData->bitmapLength)) return FALSE; if (update->autoCalculateBitmapData) { bitmapData->flags = 0; bitmapData->cbCompFirstRowSize = 0; if (bitmapData->compressed) bitmapData->flags |= BITMAP_COMPRESSION; if (update->context->settings->NoBitmapCompressionHeader) { bitmapData->flags |= NO_BITMAP_COMPRESSION_HDR; bitmapData->cbCompMainBodySize = bitmapData->bitmapLength; } } Stream_Write_UINT16(s, bitmapData->destLeft); Stream_Write_UINT16(s, bitmapData->destTop); Stream_Write_UINT16(s, bitmapData->destRight); Stream_Write_UINT16(s, bitmapData->destBottom); Stream_Write_UINT16(s, bitmapData->width); Stream_Write_UINT16(s, bitmapData->height); Stream_Write_UINT16(s, bitmapData->bitsPerPixel); Stream_Write_UINT16(s, bitmapData->flags); Stream_Write_UINT16(s, bitmapData->bitmapLength); if (bitmapData->flags & BITMAP_COMPRESSION) { if (!(bitmapData->flags & NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16(s, bitmapData->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16(s, bitmapData->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ } Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } else { Stream_Write(s, bitmapData->bitmapDataStream, bitmapData->bitmapLength); } return TRUE; } BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s) { UINT32 i; BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE)); if (!bitmapUpdate) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %" PRIu32 "", bitmapUpdate->number); if (bitmapUpdate->number > bitmapUpdate->count) { UINT32 count = bitmapUpdate->number * 2; BITMAP_DATA* newdata = (BITMAP_DATA*)realloc(bitmapUpdate->rectangles, sizeof(BITMAP_DATA) * count); if (!newdata) goto fail; bitmapUpdate->rectangles = newdata; ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count], sizeof(BITMAP_DATA) * (count - bitmapUpdate->count)); bitmapUpdate->count = count; } /* rectangles */ for (i = 0; i < bitmapUpdate->number; i++) { if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) goto fail; } return bitmapUpdate; fail: free_bitmap_update(update->context, bitmapUpdate); return NULL; } static BOOL update_write_bitmap_update(rdpUpdate* update, wStream* s, const BITMAP_UPDATE* bitmapUpdate) { int i; if (!Stream_EnsureRemainingCapacity(s, 32)) return FALSE; Stream_Write_UINT16(s, UPDATE_TYPE_BITMAP); /* updateType */ Stream_Write_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */ /* rectangles */ for (i = 0; i < (int)bitmapUpdate->number; i++) { if (!update_write_bitmap_data(update, s, &bitmapUpdate->rectangles[i])) return FALSE; } return TRUE; } PALETTE_UPDATE* update_read_palette(rdpUpdate* update, wStream* s) { int i; PALETTE_ENTRY* entry; PALETTE_UPDATE* palette_update = calloc(1, sizeof(PALETTE_UPDATE)); if (!palette_update) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ Stream_Read_UINT32(s, palette_update->number); /* numberColors (4 bytes), must be set to 256 */ if (palette_update->number > 256) palette_update->number = 256; if (Stream_GetRemainingLength(s) < palette_update->number * 3) goto fail; /* paletteEntries */ for (i = 0; i < (int)palette_update->number; i++) { entry = &palette_update->entries[i]; Stream_Read_UINT8(s, entry->red); Stream_Read_UINT8(s, entry->green); Stream_Read_UINT8(s, entry->blue); } return palette_update; fail: free_palette_update(update->context, palette_update); return NULL; } static BOOL update_read_synchronize(rdpUpdate* update, wStream* s) { WINPR_UNUSED(update); return Stream_SafeSeek(s, 2); /* pad2Octets (2 bytes) */ /** * The Synchronize Update is an artifact from the * T.128 protocol and should be ignored. */ } static BOOL update_read_play_sound(wStream* s, PLAY_SOUND_UPDATE* play_sound) { if (Stream_GetRemainingLength(s) < 8) return FALSE; Stream_Read_UINT32(s, play_sound->duration); /* duration (4 bytes) */ Stream_Read_UINT32(s, play_sound->frequency); /* frequency (4 bytes) */ return TRUE; } BOOL update_recv_play_sound(rdpUpdate* update, wStream* s) { PLAY_SOUND_UPDATE play_sound; if (!update_read_play_sound(s, &play_sound)) return FALSE; return IFCALLRESULT(FALSE, update->PlaySound, update->context, &play_sound); } POINTER_POSITION_UPDATE* update_read_pointer_position(rdpUpdate* update, wStream* s) { POINTER_POSITION_UPDATE* pointer_position = calloc(1, sizeof(POINTER_POSITION_UPDATE)); if (!pointer_position) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT16(s, pointer_position->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_position->yPos); /* yPos (2 bytes) */ return pointer_position; fail: free_pointer_position_update(update->context, pointer_position); return NULL; } POINTER_SYSTEM_UPDATE* update_read_pointer_system(rdpUpdate* update, wStream* s) { POINTER_SYSTEM_UPDATE* pointer_system = calloc(1, sizeof(POINTER_SYSTEM_UPDATE)); if (!pointer_system) goto fail; if (Stream_GetRemainingLength(s) < 4) goto fail; Stream_Read_UINT32(s, pointer_system->type); /* systemPointerType (4 bytes) */ return pointer_system; fail: free_pointer_system_update(update->context, pointer_system); return NULL; } static BOOL _update_read_pointer_color(wStream* s, POINTER_COLOR_UPDATE* pointer_color, BYTE xorBpp) { BYTE* newMask; UINT32 scanlineSize; if (!pointer_color) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer_color->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer_color->xPos); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer_color->yPos); /* yPos (2 bytes) */ /** * As stated in 2.2.9.1.1.4.4 Color Pointer Update: * The maximum allowed pointer width/height is 96 pixels if the client indicated support * for large pointers by setting the LARGE_POINTER_FLAG (0x00000001) in the Large * Pointer Capability Set (section 2.2.7.2.7). If the LARGE_POINTER_FLAG was not * set, the maximum allowed pointer width/height is 32 pixels. * * So we check for a maximum of 96 for CVE-2014-0250. */ Stream_Read_UINT16(s, pointer_color->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer_color->height); /* height (2 bytes) */ if ((pointer_color->width > 96) || (pointer_color->height > 96)) goto fail; Stream_Read_UINT16(s, pointer_color->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer_color->lengthXorMask); /* lengthXorMask (2 bytes) */ /** * There does not seem to be any documentation on why * xPos / yPos can be larger than width / height * so it is missing in documentation or a bug in implementation * 2.2.9.1.1.4.4 Color Pointer Update (TS_COLORPOINTERATTRIBUTE) */ if (pointer_color->xPos >= pointer_color->width) pointer_color->xPos = 0; if (pointer_color->yPos >= pointer_color->height) pointer_color->yPos = 0; if (pointer_color->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer_color->lengthXorMask) goto fail; scanlineSize = (7 + xorBpp * pointer_color->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer_color->width, pointer_color->height, pointer_color->lengthXorMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->xorMaskData, pointer_color->lengthXorMask); if (!newMask) goto fail; pointer_color->xorMaskData = newMask; Stream_Read(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); } if (pointer_color->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer_color->lengthAndMask) goto fail; scanlineSize = ((7 + pointer_color->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer_color->height != pointer_color->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer_color->lengthAndMask, scanlineSize * pointer_color->height); goto fail; } newMask = realloc(pointer_color->andMaskData, pointer_color->lengthAndMask); if (!newMask) goto fail; pointer_color->andMaskData = newMask; Stream_Read(s, pointer_color->andMaskData, pointer_color->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_COLOR_UPDATE* update_read_pointer_color(rdpUpdate* update, wStream* s, BYTE xorBpp) { POINTER_COLOR_UPDATE* pointer_color = calloc(1, sizeof(POINTER_COLOR_UPDATE)); if (!pointer_color) goto fail; if (!_update_read_pointer_color(s, pointer_color, xorBpp)) goto fail; return pointer_color; fail: free_pointer_color_update(update->context, pointer_color); return NULL; } static BOOL _update_read_pointer_large(wStream* s, POINTER_LARGE_UPDATE* pointer) { BYTE* newMask; UINT32 scanlineSize; if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 14) goto fail; Stream_Read_UINT16(s, pointer->xorBpp); Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotX); /* xPos (2 bytes) */ Stream_Read_UINT16(s, pointer->hotSpotY); /* yPos (2 bytes) */ Stream_Read_UINT16(s, pointer->width); /* width (2 bytes) */ Stream_Read_UINT16(s, pointer->height); /* height (2 bytes) */ if ((pointer->width > 384) || (pointer->height > 384)) goto fail; Stream_Read_UINT16(s, pointer->lengthAndMask); /* lengthAndMask (2 bytes) */ Stream_Read_UINT16(s, pointer->lengthXorMask); /* lengthXorMask (2 bytes) */ if (pointer->hotSpotX >= pointer->width) pointer->hotSpotX = 0; if (pointer->hotSpotY >= pointer->height) pointer->hotSpotY = 0; if (pointer->lengthXorMask > 0) { /** * Spec states that: * * xorMaskData (variable): A variable-length array of bytes. Contains the 24-bpp, bottom-up * XOR mask scan-line data. The XOR mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 3x3 pixel cursor is being sent, then each scan-line will * consume 10 bytes (3 pixels per scan-line multiplied by 3 bytes per pixel, rounded up to * the next even number of bytes). * * In fact instead of 24-bpp, the bpp parameter is given by the containing packet. */ if (Stream_GetRemainingLength(s) < pointer->lengthXorMask) goto fail; scanlineSize = (7 + pointer->xorBpp * pointer->width) / 8; scanlineSize = ((scanlineSize + 1) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthXorMask) { WLog_ERR(TAG, "invalid lengthXorMask: width=%" PRIu32 " height=%" PRIu32 ", %" PRIu32 " instead of %" PRIu32 "", pointer->width, pointer->height, pointer->lengthXorMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->xorMaskData, pointer->lengthXorMask); if (!newMask) goto fail; pointer->xorMaskData = newMask; Stream_Read(s, pointer->xorMaskData, pointer->lengthXorMask); } if (pointer->lengthAndMask > 0) { /** * andMaskData (variable): A variable-length array of bytes. Contains the 1-bpp, bottom-up * AND mask scan-line data. The AND mask is padded to a 2-byte boundary for each encoded * scan-line. For example, if a 7x7 pixel cursor is being sent, then each scan-line will * consume 2 bytes (7 pixels per scan-line multiplied by 1 bpp, rounded up to the next even * number of bytes). */ if (Stream_GetRemainingLength(s) < pointer->lengthAndMask) goto fail; scanlineSize = ((7 + pointer->width) / 8); scanlineSize = ((1 + scanlineSize) / 2) * 2; if (scanlineSize * pointer->height != pointer->lengthAndMask) { WLog_ERR(TAG, "invalid lengthAndMask: %" PRIu32 " instead of %" PRIu32 "", pointer->lengthAndMask, scanlineSize * pointer->height); goto fail; } newMask = realloc(pointer->andMaskData, pointer->lengthAndMask); if (!newMask) goto fail; pointer->andMaskData = newMask; Stream_Read(s, pointer->andMaskData, pointer->lengthAndMask); } if (Stream_GetRemainingLength(s) > 0) Stream_Seek_UINT8(s); /* pad (1 byte) */ return TRUE; fail: return FALSE; } POINTER_LARGE_UPDATE* update_read_pointer_large(rdpUpdate* update, wStream* s) { POINTER_LARGE_UPDATE* pointer = calloc(1, sizeof(POINTER_LARGE_UPDATE)); if (!pointer) goto fail; if (!_update_read_pointer_large(s, pointer)) goto fail; return pointer; fail: free_pointer_large_update(update->context, pointer); return NULL; } POINTER_NEW_UPDATE* update_read_pointer_new(rdpUpdate* update, wStream* s) { POINTER_NEW_UPDATE* pointer_new = calloc(1, sizeof(POINTER_NEW_UPDATE)); if (!pointer_new) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ if ((pointer_new->xorBpp < 1) || (pointer_new->xorBpp > 32)) { WLog_ERR(TAG, "invalid xorBpp %" PRIu32 "", pointer_new->xorBpp); goto fail; } if (!_update_read_pointer_color(s, &pointer_new->colorPtrAttr, pointer_new->xorBpp)) /* colorPtrAttr */ goto fail; return pointer_new; fail: free_pointer_new_update(update->context, pointer_new); return NULL; } POINTER_CACHED_UPDATE* update_read_pointer_cached(rdpUpdate* update, wStream* s) { POINTER_CACHED_UPDATE* pointer = calloc(1, sizeof(POINTER_CACHED_UPDATE)); if (!pointer) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT16(s, pointer->cacheIndex); /* cacheIndex (2 bytes) */ return pointer; fail: free_pointer_cached_update(update->context, pointer); return NULL; } BOOL update_recv_pointer(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 messageType; rdpContext* context = update->context; rdpPointerUpdate* pointer = update->pointer; if (Stream_GetRemainingLength(s) < 2 + 2) return FALSE; Stream_Read_UINT16(s, messageType); /* messageType (2 bytes) */ Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */ switch (messageType) { case PTR_MSG_TYPE_POSITION: { POINTER_POSITION_UPDATE* pointer_position = update_read_pointer_position(update, s); if (pointer_position) { rc = IFCALLRESULT(FALSE, pointer->PointerPosition, context, pointer_position); free_pointer_position_update(context, pointer_position); } } break; case PTR_MSG_TYPE_SYSTEM: { POINTER_SYSTEM_UPDATE* pointer_system = update_read_pointer_system(update, s); if (pointer_system) { rc = IFCALLRESULT(FALSE, pointer->PointerSystem, context, pointer_system); free_pointer_system_update(context, pointer_system); } } break; case PTR_MSG_TYPE_COLOR: { POINTER_COLOR_UPDATE* pointer_color = update_read_pointer_color(update, s, 24); if (pointer_color) { rc = IFCALLRESULT(FALSE, pointer->PointerColor, context, pointer_color); free_pointer_color_update(context, pointer_color); } } break; case PTR_MSG_TYPE_POINTER_LARGE: { POINTER_LARGE_UPDATE* pointer_large = update_read_pointer_large(update, s); if (pointer_large) { rc = IFCALLRESULT(FALSE, pointer->PointerLarge, context, pointer_large); free_pointer_large_update(context, pointer_large); } } break; case PTR_MSG_TYPE_POINTER: { POINTER_NEW_UPDATE* pointer_new = update_read_pointer_new(update, s); if (pointer_new) { rc = IFCALLRESULT(FALSE, pointer->PointerNew, context, pointer_new); free_pointer_new_update(context, pointer_new); } } break; case PTR_MSG_TYPE_CACHED: { POINTER_CACHED_UPDATE* pointer_cached = update_read_pointer_cached(update, s); if (pointer_cached) { rc = IFCALLRESULT(FALSE, pointer->PointerCached, context, pointer_cached); free_pointer_cached_update(context, pointer_cached); } } break; default: break; } return rc; } BOOL update_recv(rdpUpdate* update, wStream* s) { BOOL rc = FALSE; UINT16 updateType; rdpContext* context = update->context; if (Stream_GetRemainingLength(s) < 2) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 2"); return FALSE; } Stream_Read_UINT16(s, updateType); /* updateType (2 bytes) */ WLog_Print(update->log, WLOG_TRACE, "%s Update Data PDU", UPDATE_TYPE_STRINGS[updateType]); if (!update_begin_paint(update)) goto fail; switch (updateType) { case UPDATE_TYPE_ORDERS: rc = update_recv_orders(update, s); break; case UPDATE_TYPE_BITMAP: { BITMAP_UPDATE* bitmap_update = update_read_bitmap_update(update, s); if (!bitmap_update) { WLog_ERR(TAG, "UPDATE_TYPE_BITMAP - update_read_bitmap_update() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->BitmapUpdate, context, bitmap_update); free_bitmap_update(update->context, bitmap_update); } break; case UPDATE_TYPE_PALETTE: { PALETTE_UPDATE* palette_update = update_read_palette(update, s); if (!palette_update) { WLog_ERR(TAG, "UPDATE_TYPE_PALETTE - update_read_palette() failed"); goto fail; } rc = IFCALLRESULT(FALSE, update->Palette, context, palette_update); free_palette_update(context, palette_update); } break; case UPDATE_TYPE_SYNCHRONIZE: if (!update_read_synchronize(update, s)) goto fail; rc = IFCALLRESULT(TRUE, update->Synchronize, context); break; default: break; } fail: if (!update_end_paint(update)) rc = FALSE; if (!rc) { WLog_ERR(TAG, "UPDATE_TYPE %s [%" PRIu16 "] failed", update_type_to_string(updateType), updateType); return FALSE; } return TRUE; } void update_reset_state(rdpUpdate* update) { rdpPrimaryUpdate* primary = update->primary; rdpAltSecUpdate* altsec = update->altsec; if (primary->fast_glyph.glyphData.aj) { free(primary->fast_glyph.glyphData.aj); primary->fast_glyph.glyphData.aj = NULL; } ZeroMemory(&primary->order_info, sizeof(ORDER_INFO)); ZeroMemory(&primary->dstblt, sizeof(DSTBLT_ORDER)); ZeroMemory(&primary->patblt, sizeof(PATBLT_ORDER)); ZeroMemory(&primary->scrblt, sizeof(SCRBLT_ORDER)); ZeroMemory(&primary->opaque_rect, sizeof(OPAQUE_RECT_ORDER)); ZeroMemory(&primary->draw_nine_grid, sizeof(DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->multi_dstblt, sizeof(MULTI_DSTBLT_ORDER)); ZeroMemory(&primary->multi_patblt, sizeof(MULTI_PATBLT_ORDER)); ZeroMemory(&primary->multi_scrblt, sizeof(MULTI_SCRBLT_ORDER)); ZeroMemory(&primary->multi_opaque_rect, sizeof(MULTI_OPAQUE_RECT_ORDER)); ZeroMemory(&primary->multi_draw_nine_grid, sizeof(MULTI_DRAW_NINE_GRID_ORDER)); ZeroMemory(&primary->line_to, sizeof(LINE_TO_ORDER)); ZeroMemory(&primary->polyline, sizeof(POLYLINE_ORDER)); ZeroMemory(&primary->memblt, sizeof(MEMBLT_ORDER)); ZeroMemory(&primary->mem3blt, sizeof(MEM3BLT_ORDER)); ZeroMemory(&primary->save_bitmap, sizeof(SAVE_BITMAP_ORDER)); ZeroMemory(&primary->glyph_index, sizeof(GLYPH_INDEX_ORDER)); ZeroMemory(&primary->fast_index, sizeof(FAST_INDEX_ORDER)); ZeroMemory(&primary->fast_glyph, sizeof(FAST_GLYPH_ORDER)); ZeroMemory(&primary->polygon_sc, sizeof(POLYGON_SC_ORDER)); ZeroMemory(&primary->polygon_cb, sizeof(POLYGON_CB_ORDER)); ZeroMemory(&primary->ellipse_sc, sizeof(ELLIPSE_SC_ORDER)); ZeroMemory(&primary->ellipse_cb, sizeof(ELLIPSE_CB_ORDER)); primary->order_info.orderType = ORDER_TYPE_PATBLT; if (!update->initialState) { altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(altsec->SwitchSurface, update->context, &(altsec->switch_surface)); } } BOOL update_post_connect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) if (!(update->proxy = update_message_proxy_new(update))) return FALSE; update->altsec->switch_surface.bitmapId = SCREEN_BITMAP_SURFACE; IFCALL(update->altsec->SwitchSurface, update->context, &(update->altsec->switch_surface)); update->initialState = FALSE; return TRUE; } void update_post_disconnect(rdpUpdate* update) { update->asynchronous = update->context->settings->AsyncUpdate; if (update->asynchronous) update_message_proxy_free(update->proxy); update->initialState = TRUE; } static BOOL _update_begin_paint(rdpContext* context) { wStream* s; rdpUpdate* update = context->update; if (update->us) { if (!update_end_paint(update)) return FALSE; } s = fastpath_update_pdu_init_new(context->rdp->fastpath); if (!s) return FALSE; Stream_SealLength(s); Stream_Seek(s, 2); /* numberOrders (2 bytes) */ update->combineUpdates = TRUE; update->numberOrders = 0; update->us = s; return TRUE; } static BOOL _update_end_paint(rdpContext* context) { wStream* s; int headerLength; rdpUpdate* update = context->update; if (!update->us) return FALSE; s = update->us; headerLength = Stream_Length(s); Stream_SealLength(s); Stream_SetPosition(s, headerLength); Stream_Write_UINT16(s, update->numberOrders); /* numberOrders (2 bytes) */ Stream_SetPosition(s, Stream_Length(s)); if (update->numberOrders > 0) { WLog_DBG(TAG, "sending %" PRIu16 " orders", update->numberOrders); fastpath_send_update_pdu(context->rdp->fastpath, FASTPATH_UPDATETYPE_ORDERS, s, FALSE); } update->combineUpdates = FALSE; update->numberOrders = 0; update->us = NULL; Stream_Free(s, TRUE); return TRUE; } static void update_flush(rdpContext* context) { rdpUpdate* update = context->update; if (update->numberOrders > 0) { update_end_paint(update); update_begin_paint(update); } } static void update_force_flush(rdpContext* context) { update_flush(context); } static BOOL update_check_flush(rdpContext* context, int size) { wStream* s; rdpUpdate* update = context->update; s = update->us; if (!update->us) { update_begin_paint(update); return FALSE; } if (Stream_GetPosition(s) + size + 64 >= 0x3FFF) { update_flush(context); return TRUE; } return FALSE; } static BOOL update_set_bounds(rdpContext* context, const rdpBounds* bounds) { rdpUpdate* update = context->update; CopyMemory(&update->previousBounds, &update->currentBounds, sizeof(rdpBounds)); if (!bounds) ZeroMemory(&update->currentBounds, sizeof(rdpBounds)); else CopyMemory(&update->currentBounds, bounds, sizeof(rdpBounds)); return TRUE; } static BOOL update_bounds_is_null(rdpBounds* bounds) { if ((bounds->left == 0) && (bounds->top == 0) && (bounds->right == 0) && (bounds->bottom == 0)) return TRUE; return FALSE; } static BOOL update_bounds_equals(rdpBounds* bounds1, rdpBounds* bounds2) { if ((bounds1->left == bounds2->left) && (bounds1->top == bounds2->top) && (bounds1->right == bounds2->right) && (bounds1->bottom == bounds2->bottom)) return TRUE; return FALSE; } static int update_prepare_bounds(rdpContext* context, ORDER_INFO* orderInfo) { int length = 0; rdpUpdate* update = context->update; orderInfo->boundsFlags = 0; if (update_bounds_is_null(&update->currentBounds)) return 0; orderInfo->controlFlags |= ORDER_BOUNDS; if (update_bounds_equals(&update->previousBounds, &update->currentBounds)) { orderInfo->controlFlags |= ORDER_ZERO_BOUNDS_DELTAS; return 0; } else { length += 1; if (update->previousBounds.left != update->currentBounds.left) { orderInfo->bounds.left = update->currentBounds.left; orderInfo->boundsFlags |= BOUND_LEFT; length += 2; } if (update->previousBounds.top != update->currentBounds.top) { orderInfo->bounds.top = update->currentBounds.top; orderInfo->boundsFlags |= BOUND_TOP; length += 2; } if (update->previousBounds.right != update->currentBounds.right) { orderInfo->bounds.right = update->currentBounds.right; orderInfo->boundsFlags |= BOUND_RIGHT; length += 2; } if (update->previousBounds.bottom != update->currentBounds.bottom) { orderInfo->bounds.bottom = update->currentBounds.bottom; orderInfo->boundsFlags |= BOUND_BOTTOM; length += 2; } } return length; } static int update_prepare_order_info(rdpContext* context, ORDER_INFO* orderInfo, UINT32 orderType) { int length = 1; orderInfo->fieldFlags = 0; orderInfo->orderType = orderType; orderInfo->controlFlags = ORDER_STANDARD; orderInfo->controlFlags |= ORDER_TYPE_CHANGE; length += 1; length += PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]; length += update_prepare_bounds(context, orderInfo); return length; } static int update_write_order_info(rdpContext* context, wStream* s, ORDER_INFO* orderInfo, size_t offset) { size_t position; WINPR_UNUSED(context); position = Stream_GetPosition(s); Stream_SetPosition(s, offset); Stream_Write_UINT8(s, orderInfo->controlFlags); /* controlFlags (1 byte) */ if (orderInfo->controlFlags & ORDER_TYPE_CHANGE) Stream_Write_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ update_write_field_flags(s, orderInfo->fieldFlags, orderInfo->controlFlags, PRIMARY_DRAWING_ORDER_FIELD_BYTES[orderInfo->orderType]); update_write_bounds(s, orderInfo); Stream_SetPosition(s, position); return 0; } static void update_write_refresh_rect(wStream* s, BYTE count, const RECTANGLE_16* areas) { int i; Stream_Write_UINT8(s, count); /* numberOfAreas (1 byte) */ Stream_Seek(s, 3); /* pad3Octets (3 bytes) */ for (i = 0; i < count; i++) { Stream_Write_UINT16(s, areas[i].left); /* left (2 bytes) */ Stream_Write_UINT16(s, areas[i].top); /* top (2 bytes) */ Stream_Write_UINT16(s, areas[i].right); /* right (2 bytes) */ Stream_Write_UINT16(s, areas[i].bottom); /* bottom (2 bytes) */ } } static BOOL update_send_refresh_rect(rdpContext* context, BYTE count, const RECTANGLE_16* areas) { rdpRdp* rdp = context->rdp; if (rdp->settings->RefreshRect) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_refresh_rect(s, count, areas); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_REFRESH_RECT, rdp->mcs->userId); } return TRUE; } static void update_write_suppress_output(wStream* s, BYTE allow, const RECTANGLE_16* area) { Stream_Write_UINT8(s, allow); /* allowDisplayUpdates (1 byte) */ /* Use zeros for padding (like mstsc) for compatibility with legacy servers */ Stream_Zero(s, 3); /* pad3Octets (3 bytes) */ if (allow > 0) { Stream_Write_UINT16(s, area->left); /* left (2 bytes) */ Stream_Write_UINT16(s, area->top); /* top (2 bytes) */ Stream_Write_UINT16(s, area->right); /* right (2 bytes) */ Stream_Write_UINT16(s, area->bottom); /* bottom (2 bytes) */ } } static BOOL update_send_suppress_output(rdpContext* context, BYTE allow, const RECTANGLE_16* area) { rdpRdp* rdp = context->rdp; if (rdp->settings->SuppressOutput) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; update_write_suppress_output(s, allow, area); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SUPPRESS_OUTPUT, rdp->mcs->userId); } return TRUE; } static BOOL update_send_surface_command(rdpContext* context, wStream* s) { wStream* update; rdpRdp* rdp = context->rdp; BOOL ret; update = fastpath_update_pdu_init(rdp->fastpath); if (!update) return FALSE; if (!Stream_EnsureRemainingCapacity(update, Stream_GetPosition(s))) { ret = FALSE; goto out; } Stream_Write(update, Stream_Buffer(s), Stream_GetPosition(s)); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, update, FALSE); out: Stream_Release(update); return ret; } static BOOL update_send_surface_bits(rdpContext* context, const SURFACE_BITS_COMMAND* surfaceBitsCommand) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_surface_bits(s, surfaceBitsCommand)) goto out_fail; if (!fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, surfaceBitsCommand->skipCompression)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_marker(rdpContext* context, const SURFACE_FRAME_MARKER* surfaceFrameMarker) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_surfcmd_frame_marker(s, surfaceFrameMarker->frameAction, surfaceFrameMarker->frameId) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, FALSE)) goto out_fail; update_force_flush(context); ret = TRUE; out_fail: Stream_Release(s); return ret; } static BOOL update_send_surface_frame_bits(rdpContext* context, const SURFACE_BITS_COMMAND* cmd, BOOL first, BOOL last, UINT32 frameId) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (first) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_BEGIN, frameId)) goto out_fail; } if (!update_write_surfcmd_surface_bits(s, cmd)) goto out_fail; if (last) { if (!update_write_surfcmd_frame_marker(s, SURFACECMD_FRAMEACTION_END, frameId)) goto out_fail; } ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SURFCMDS, s, cmd->skipCompression); update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_frame_acknowledge(rdpContext* context, UINT32 frameId) { rdpRdp* rdp = context->rdp; if (rdp->settings->ReceivedCapabilities[CAPSET_TYPE_FRAME_ACKNOWLEDGE]) { wStream* s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, frameId); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_FRAME_ACKNOWLEDGE, rdp->mcs->userId); } return TRUE; } static BOOL update_send_synchronize(rdpContext* context) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Zero(s, 2); /* pad2Octets (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_SYNCHRONIZE, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_desktop_resize(rdpContext* context) { return rdp_server_reactivate(context->rdp); } static BOOL update_send_bitmap_update(rdpContext* context, const BITMAP_UPDATE* bitmapUpdate) { wStream* s; rdpRdp* rdp = context->rdp; rdpUpdate* update = context->update; BOOL ret = TRUE; update_force_flush(context); s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_bitmap_update(update, s, bitmapUpdate) || !fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_BITMAP, s, bitmapUpdate->skipCompression)) { ret = FALSE; goto out_fail; } update_force_flush(context); out_fail: Stream_Release(s); return ret; } static BOOL update_send_play_sound(rdpContext* context, const PLAY_SOUND_UPDATE* play_sound) { wStream* s; rdpRdp* rdp = context->rdp; if (!rdp->settings->ReceivedCapabilities[CAPSET_TYPE_SOUND]) { return TRUE; } s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT32(s, play_sound->duration); Stream_Write_UINT32(s, play_sound->frequency); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_PLAY_SOUND, rdp->mcs->userId); } /** * Primary Drawing Orders */ static BOOL update_send_dstblt(rdpContext* context, const DSTBLT_ORDER* dstblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_DSTBLT); inf = update_approximate_dstblt_order(&orderInfo, dstblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_dstblt_order(s, &orderInfo, dstblt)) return FALSE; update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_patblt(rdpContext* context, PATBLT_ORDER* patblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_PATBLT); update_check_flush(context, headerLength + update_approximate_patblt_order(&orderInfo, patblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_patblt_order(s, &orderInfo, patblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_scrblt(rdpContext* context, const SCRBLT_ORDER* scrblt) { wStream* s; UINT32 offset; UINT32 headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_SCRBLT); inf = update_approximate_scrblt_order(&orderInfo, scrblt); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return TRUE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_scrblt_order(s, &orderInfo, scrblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_opaque_rect(rdpContext* context, const OPAQUE_RECT_ORDER* opaque_rect) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_OPAQUE_RECT); update_check_flush(context, headerLength + update_approximate_opaque_rect_order(&orderInfo, opaque_rect)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_opaque_rect_order(s, &orderInfo, opaque_rect); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_line_to(rdpContext* context, const LINE_TO_ORDER* line_to) { wStream* s; int offset; int headerLength; ORDER_INFO orderInfo; int inf; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_LINE_TO); inf = update_approximate_line_to_order(&orderInfo, line_to); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_line_to_order(s, &orderInfo, line_to); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_memblt(rdpContext* context, MEMBLT_ORDER* memblt) { wStream* s; size_t offset; int headerLength; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_MEMBLT); update_check_flush(context, headerLength + update_approximate_memblt_order(&orderInfo, memblt)); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_memblt_order(s, &orderInfo, memblt); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } static BOOL update_send_glyph_index(rdpContext* context, GLYPH_INDEX_ORDER* glyph_index) { wStream* s; size_t offset; int headerLength; int inf; ORDER_INFO orderInfo; rdpUpdate* update = context->update; headerLength = update_prepare_order_info(context, &orderInfo, ORDER_TYPE_GLYPH_INDEX); inf = update_approximate_glyph_index_order(&orderInfo, glyph_index); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; offset = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); update_write_glyph_index_order(s, &orderInfo, glyph_index); update_write_order_info(context, s, &orderInfo, offset); update->numberOrders++; return TRUE; } /* * Secondary Drawing Orders */ static BOOL update_send_cache_bitmap(rdpContext* context, const CACHE_BITMAP_ORDER* cache_bitmap) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; int inf; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap->compressed ? ORDER_TYPE_CACHE_BITMAP_COMPRESSED : ORDER_TYPE_BITMAP_UNCOMPRESSED; inf = update_approximate_cache_bitmap_order(cache_bitmap, cache_bitmap->compressed, &extraFlags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_order(s, cache_bitmap, cache_bitmap->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v2(rdpContext* context, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = cache_bitmap_v2->compressed ? ORDER_TYPE_BITMAP_COMPRESSED_V2 : ORDER_TYPE_BITMAP_UNCOMPRESSED_V2; if (context->settings->NoBitmapCompressionHeader) cache_bitmap_v2->flags |= CBR2_NO_BITMAP_COMPRESSION_HDR; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v2_order( cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v2_order(s, cache_bitmap_v2, cache_bitmap_v2->compressed, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_bitmap_v3(rdpContext* context, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3) { wStream* s; size_t bm, em; BYTE orderType; int headerLength; UINT16 extraFlags; INT16 orderLength; rdpUpdate* update = context->update; extraFlags = 0; headerLength = 6; orderType = ORDER_TYPE_BITMAP_COMPRESSED_V3; update_check_flush(context, headerLength + update_approximate_cache_bitmap_v3_order( cache_bitmap_v3, &extraFlags)); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_bitmap_v3_order(s, cache_bitmap_v3, &extraFlags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, orderType); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_color_table(rdpContext* context, const CACHE_COLOR_TABLE_ORDER* cache_color_table) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_color_table_order(cache_color_table, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_color_table_order(s, cache_color_table, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_COLOR_TABLE); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph(rdpContext* context, const CACHE_GLYPH_ORDER* cache_glyph) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_order(cache_glyph, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_order(s, cache_glyph, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_glyph_v2(rdpContext* context, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_glyph_v2_order(s, cache_glyph_v2, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_GLYPH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_cache_brush(rdpContext* context, const CACHE_BRUSH_ORDER* cache_brush) { wStream* s; UINT16 flags; size_t bm, em, inf; int headerLength; INT16 orderLength; rdpUpdate* update = context->update; flags = 0; headerLength = 6; inf = update_approximate_cache_brush_order(cache_brush, &flags); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_cache_brush_order(s, cache_brush, &flags)) return FALSE; em = Stream_GetPosition(s); orderLength = (em - bm) - 13; Stream_SetPosition(s, bm); Stream_Write_UINT8(s, ORDER_STANDARD | ORDER_SECONDARY); /* controlFlags (1 byte) */ Stream_Write_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Write_UINT16(s, flags); /* extraFlags (2 bytes) */ Stream_Write_UINT8(s, ORDER_TYPE_CACHE_BRUSH); /* orderType (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } /** * Alternate Secondary Drawing Orders */ static BOOL update_send_create_offscreen_bitmap_order( rdpContext* context, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update = context->update; headerLength = 1; orderType = ORDER_TYPE_CREATE_OFFSCREEN_BITMAP; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_create_offscreen_bitmap_order(s, create_offscreen_bitmap)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_switch_surface_order(rdpContext* context, const SWITCH_SURFACE_ORDER* switch_surface) { wStream* s; size_t bm, em, inf; BYTE orderType; BYTE controlFlags; int headerLength; rdpUpdate* update; if (!context || !switch_surface || !context->update) return FALSE; update = context->update; headerLength = 1; orderType = ORDER_TYPE_SWITCH_SURFACE; controlFlags = ORDER_SECONDARY | (orderType << 2); inf = update_approximate_switch_surface_order(switch_surface); update_check_flush(context, headerLength + inf); s = update->us; if (!s) return FALSE; bm = Stream_GetPosition(s); if (!Stream_EnsureRemainingCapacity(s, headerLength)) return FALSE; Stream_Seek(s, headerLength); if (!update_write_switch_surface_order(s, switch_surface)) return FALSE; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); Stream_Write_UINT8(s, controlFlags); /* controlFlags (1 byte) */ Stream_SetPosition(s, em); update->numberOrders++; return TRUE; } static BOOL update_send_pointer_system(rdpContext* context, const POINTER_SYSTEM_UPDATE* pointer_system) { wStream* s; BYTE updateCode; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (pointer_system->type == SYSPTR_NULL) updateCode = FASTPATH_UPDATETYPE_PTR_NULL; else updateCode = FASTPATH_UPDATETYPE_PTR_DEFAULT; ret = fastpath_send_update_pdu(rdp->fastpath, updateCode, s, FALSE); Stream_Release(s); return ret; } static BOOL update_send_pointer_position(rdpContext* context, const POINTER_POSITION_UPDATE* pointerPosition) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointerPosition->xPos); /* xPos (2 bytes) */ Stream_Write_UINT16(s, pointerPosition->yPos); /* yPos (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_PTR_POSITION, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_color(wStream* s, const POINTER_COLOR_UPDATE* pointer_color) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer_color->lengthAndMask + pointer_color->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer_color->cacheIndex); Stream_Write_UINT16(s, pointer_color->xPos); Stream_Write_UINT16(s, pointer_color->yPos); Stream_Write_UINT16(s, pointer_color->width); Stream_Write_UINT16(s, pointer_color->height); Stream_Write_UINT16(s, pointer_color->lengthAndMask); Stream_Write_UINT16(s, pointer_color->lengthXorMask); if (pointer_color->lengthXorMask > 0) Stream_Write(s, pointer_color->xorMaskData, pointer_color->lengthXorMask); if (pointer_color->lengthAndMask > 0) Stream_Write(s, pointer_color->andMaskData, pointer_color->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_color(rdpContext* context, const POINTER_COLOR_UPDATE* pointer_color) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_color(s, pointer_color)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_COLOR, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_write_pointer_large(wStream* s, const POINTER_LARGE_UPDATE* pointer) { if (!Stream_EnsureRemainingCapacity(s, 32 + pointer->lengthAndMask + pointer->lengthXorMask)) return FALSE; Stream_Write_UINT16(s, pointer->xorBpp); Stream_Write_UINT16(s, pointer->cacheIndex); Stream_Write_UINT16(s, pointer->hotSpotX); Stream_Write_UINT16(s, pointer->hotSpotY); Stream_Write_UINT16(s, pointer->width); Stream_Write_UINT16(s, pointer->height); Stream_Write_UINT32(s, pointer->lengthAndMask); Stream_Write_UINT32(s, pointer->lengthXorMask); Stream_Write(s, pointer->xorMaskData, pointer->lengthXorMask); Stream_Write(s, pointer->andMaskData, pointer->lengthAndMask); Stream_Write_UINT8(s, 0); /* pad (1 byte) */ return TRUE; } static BOOL update_send_pointer_large(rdpContext* context, const POINTER_LARGE_UPDATE* pointer) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!update_write_pointer_large(s, pointer)) goto out_fail; ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_LARGE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_new(rdpContext* context, const POINTER_NEW_UPDATE* pointer_new) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret = FALSE; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, 16)) goto out_fail; Stream_Write_UINT16(s, pointer_new->xorBpp); /* xorBpp (2 bytes) */ update_write_pointer_color(s, &pointer_new->colorPtrAttr); ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_POINTER, s, FALSE); out_fail: Stream_Release(s); return ret; } static BOOL update_send_pointer_cached(rdpContext* context, const POINTER_CACHED_UPDATE* pointer_cached) { wStream* s; rdpRdp* rdp = context->rdp; BOOL ret; s = fastpath_update_pdu_init(rdp->fastpath); if (!s) return FALSE; Stream_Write_UINT16(s, pointer_cached->cacheIndex); /* cacheIndex (2 bytes) */ ret = fastpath_send_update_pdu(rdp->fastpath, FASTPATH_UPDATETYPE_CACHED, s, FALSE); Stream_Release(s); return ret; } BOOL update_read_refresh_rect(rdpUpdate* update, wStream* s) { int index; BYTE numberOfAreas; RECTANGLE_16* areas; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, numberOfAreas); Stream_Seek(s, 3); /* pad3Octects */ if (Stream_GetRemainingLength(s) < ((size_t)numberOfAreas * 4 * 2)) return FALSE; areas = (RECTANGLE_16*)calloc(numberOfAreas, sizeof(RECTANGLE_16)); if (!areas) return FALSE; for (index = 0; index < numberOfAreas; index++) { Stream_Read_UINT16(s, areas[index].left); Stream_Read_UINT16(s, areas[index].top); Stream_Read_UINT16(s, areas[index].right); Stream_Read_UINT16(s, areas[index].bottom); } if (update->context->settings->RefreshRect) IFCALL(update->RefreshRect, update->context, numberOfAreas, areas); else WLog_Print(update->log, WLOG_WARN, "ignoring refresh rect request from client"); free(areas); return TRUE; } BOOL update_read_suppress_output(rdpUpdate* update, wStream* s) { RECTANGLE_16* prect = NULL; RECTANGLE_16 rect = { 0 }; BYTE allowDisplayUpdates; if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT8(s, allowDisplayUpdates); Stream_Seek(s, 3); /* pad3Octects */ if (allowDisplayUpdates > 0) { if (Stream_GetRemainingLength(s) < sizeof(RECTANGLE_16)) return FALSE; Stream_Read_UINT16(s, rect.left); Stream_Read_UINT16(s, rect.top); Stream_Read_UINT16(s, rect.right); Stream_Read_UINT16(s, rect.bottom); prect = &rect; } if (update->context->settings->SuppressOutput) IFCALL(update->SuppressOutput, update->context, allowDisplayUpdates, prect); else WLog_Print(update->log, WLOG_WARN, "ignoring suppress output request from client"); return TRUE; } static BOOL update_send_set_keyboard_indicators(rdpContext* context, UINT16 led_flags) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; Stream_Write_UINT16(s, 0); /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.1.1 */ Stream_Write_UINT16(s, led_flags); /* ledFlags (2 bytes) */ return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_INDICATORS, rdp->mcs->userId); } static BOOL update_send_set_keyboard_ime_status(rdpContext* context, UINT16 imeId, UINT32 imeState, UINT32 imeConvMode) { wStream* s; rdpRdp* rdp = context->rdp; s = rdp_data_pdu_init(rdp); if (!s) return FALSE; /* unitId should be 0 according to MS-RDPBCGR 2.2.8.2.2.1 */ Stream_Write_UINT16(s, imeId); Stream_Write_UINT32(s, imeState); Stream_Write_UINT32(s, imeConvMode); return rdp_send_data_pdu(rdp, s, DATA_PDU_TYPE_SET_KEYBOARD_IME_STATUS, rdp->mcs->userId); } static UINT16 update_calculate_new_or_existing_window(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { UINT16 orderSize = 11; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) orderSize += 2 + stateOrder->titleInfo.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) orderSize += 2 + stateOrder->numWindowRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) orderSize += 8; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) orderSize += 2 + stateOrder->numVisibilityRects * sizeof(RECTANGLE_16); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) orderSize += 2 + stateOrder->OverlayDescription.length; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) orderSize += 1; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) orderSize += 1; return orderSize; } static BOOL update_send_new_or_existing_window(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_new_or_existing_window(orderInfo, stateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OWNER) != 0) Stream_Write_UINT32(s, stateOrder->ownerWindowId); if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_STYLE) != 0) { Stream_Write_UINT32(s, stateOrder->style); Stream_Write_UINT32(s, stateOrder->extendedStyle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_SHOW) != 0) { Stream_Write_UINT8(s, stateOrder->showState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TITLE) != 0) { Stream_Write_UINT16(s, stateOrder->titleInfo.length); Stream_Write(s, stateOrder->titleInfo.string, stateOrder->titleInfo.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->clientOffsetX); Stream_Write_INT32(s, stateOrder->clientOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_CLIENT_AREA_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->clientAreaWidth); Stream_Write_UINT32(s, stateOrder->clientAreaHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_X) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginLeft); Stream_Write_UINT32(s, stateOrder->resizeMarginRight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RESIZE_MARGIN_Y) != 0) { Stream_Write_UINT32(s, stateOrder->resizeMarginTop); Stream_Write_UINT32(s, stateOrder->resizeMarginBottom); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_RP_CONTENT) != 0) { Stream_Write_UINT8(s, stateOrder->RPContent); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ROOT_PARENT) != 0) { Stream_Write_UINT32(s, stateOrder->rootParentHandle); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_OFFSET) != 0) { Stream_Write_INT32(s, stateOrder->windowOffsetX); Stream_Write_INT32(s, stateOrder->windowOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_CLIENT_DELTA) != 0) { Stream_Write_INT32(s, stateOrder->windowClientDeltaX); Stream_Write_INT32(s, stateOrder->windowClientDeltaY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_SIZE) != 0) { Stream_Write_UINT32(s, stateOrder->windowWidth); Stream_Write_UINT32(s, stateOrder->windowHeight); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_WND_RECTS) != 0) { Stream_Write_UINT16(s, stateOrder->numWindowRects); Stream_Write(s, stateOrder->windowRects, stateOrder->numWindowRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VIS_OFFSET) != 0) { Stream_Write_UINT32(s, stateOrder->visibleOffsetX); Stream_Write_UINT32(s, stateOrder->visibleOffsetY); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_VISIBILITY) != 0) { Stream_Write_UINT16(s, stateOrder->numVisibilityRects); Stream_Write(s, stateOrder->visibilityRects, stateOrder->numVisibilityRects * sizeof(RECTANGLE_16)); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_OVERLAY_DESCRIPTION) != 0) { Stream_Write_UINT16(s, stateOrder->OverlayDescription.length); Stream_Write(s, stateOrder->OverlayDescription.string, stateOrder->OverlayDescription.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_TASKBAR_BUTTON) != 0) { Stream_Write_UINT8(s, stateOrder->TaskbarButton); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_ENFORCE_SERVER_ZORDER) != 0) { Stream_Write_UINT8(s, stateOrder->EnforceServerZOrder); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_STATE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarState); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_APPBAR_EDGE) != 0) { Stream_Write_UINT8(s, stateOrder->AppBarEdge); } update->numberOrders++; return TRUE; } static BOOL update_send_window_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static BOOL update_send_window_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_STATE_ORDER* stateOrder) { return update_send_new_or_existing_window(context, orderInfo, stateOrder); } static UINT16 update_calculate_window_icon_order(const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { UINT16 orderSize = 23; ICON_INFO* iconInfo = iconOrder->iconInfo; orderSize += iconInfo->cbBitsColor + iconInfo->cbBitsMask; if (iconInfo->bpp <= 8) orderSize += 2 + iconInfo->cbColorTable; return orderSize; } static BOOL update_send_window_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_ICON_ORDER* iconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); ICON_INFO* iconInfo = iconOrder->iconInfo; UINT16 orderSize = update_calculate_window_icon_order(orderInfo, iconOrder); update_check_flush(context, orderSize); s = update->us; if (!s || !iconInfo) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, iconInfo->cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo->cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo->bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo->width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo->height); /* Height (2 bytes) */ if (iconInfo->bpp <= 8) { Stream_Write_UINT16(s, iconInfo->cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo->cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo->cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo->bitsMask, iconInfo->cbBitsMask); /* BitsMask (variable) */ if (iconInfo->bpp <= 8) { Stream_Write(s, iconInfo->colorTable, iconInfo->cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo->bitsColor, iconInfo->cbBitsColor); /* BitsColor (variable) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_cached_icon(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const WINDOW_CACHED_ICON_ORDER* cachedIconOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 14; CACHED_ICON_INFO cachedIcon = cachedIconOrder->cachedIcon; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ /* Write body */ Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ update->numberOrders++; return TRUE; } static BOOL update_send_window_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 11; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_new_or_existing_notification_icons_order( const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { UINT16 orderSize = 15; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) orderSize += 4; if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { orderSize += 2 + iconStateOrder->toolTip.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; orderSize += 12 + infoTip.text.length + infoTip.title.length; } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { orderSize += 4; } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; orderSize += 12; if (iconInfo.bpp <= 8) orderSize += 2 + iconInfo.cbColorTable; orderSize += iconInfo.cbBitsMask + iconInfo.cbBitsColor; } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { orderSize += 3; } return orderSize; } static BOOL update_send_new_or_existing_notification_icons(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); BOOL versionFieldPresent = FALSE; UINT16 orderSize = update_calculate_new_or_existing_notification_icons_order(orderInfo, iconStateOrder); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; if (!Stream_EnsureRemainingCapacity(s, orderSize)) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_INT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ /* Write body */ if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_VERSION) != 0) { versionFieldPresent = TRUE; Stream_Write_UINT32(s, iconStateOrder->version); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_TIP) != 0) { Stream_Write_UINT16(s, iconStateOrder->toolTip.length); Stream_Write(s, iconStateOrder->toolTip.string, iconStateOrder->toolTip.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_INFO_TIP) != 0) { NOTIFY_ICON_INFOTIP infoTip = iconStateOrder->infoTip; /* info tip should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, infoTip.timeout); /* Timeout (4 bytes) */ Stream_Write_UINT32(s, infoTip.flags); /* InfoFlags (4 bytes) */ Stream_Write_UINT16(s, infoTip.text.length); /* InfoTipText (variable) */ Stream_Write(s, infoTip.text.string, infoTip.text.length); Stream_Write_UINT16(s, infoTip.title.length); /* Title (variable) */ Stream_Write(s, infoTip.title.string, infoTip.title.length); } if ((orderInfo->fieldFlags & WINDOW_ORDER_FIELD_NOTIFY_STATE) != 0) { /* notify state should not be sent when version is 0 */ if (versionFieldPresent && iconStateOrder->version == 0) return FALSE; Stream_Write_UINT32(s, iconStateOrder->state); } if ((orderInfo->fieldFlags & WINDOW_ORDER_ICON) != 0) { ICON_INFO iconInfo = iconStateOrder->icon; Stream_Write_UINT16(s, iconInfo.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, iconInfo.cacheId); /* CacheId (1 byte) */ Stream_Write_UINT8(s, iconInfo.bpp); /* Bpp (1 byte) */ Stream_Write_UINT16(s, iconInfo.width); /* Width (2 bytes) */ Stream_Write_UINT16(s, iconInfo.height); /* Height (2 bytes) */ if (iconInfo.bpp <= 8) { Stream_Write_UINT16(s, iconInfo.cbColorTable); /* CbColorTable (2 bytes) */ } Stream_Write_UINT16(s, iconInfo.cbBitsMask); /* CbBitsMask (2 bytes) */ Stream_Write_UINT16(s, iconInfo.cbBitsColor); /* CbBitsColor (2 bytes) */ Stream_Write(s, iconInfo.bitsMask, iconInfo.cbBitsMask); /* BitsMask (variable) */ orderSize += iconInfo.cbBitsMask; if (iconInfo.bpp <= 8) { Stream_Write(s, iconInfo.colorTable, iconInfo.cbColorTable); /* ColorTable (variable) */ } Stream_Write(s, iconInfo.bitsColor, iconInfo.cbBitsColor); /* BitsColor (variable) */ } else if ((orderInfo->fieldFlags & WINDOW_ORDER_CACHED_ICON) != 0) { CACHED_ICON_INFO cachedIcon = iconStateOrder->cachedIcon; Stream_Write_UINT16(s, cachedIcon.cacheEntry); /* CacheEntry (2 bytes) */ Stream_Write_UINT8(s, cachedIcon.cacheId); /* CacheId (1 byte) */ } update->numberOrders++; return TRUE; } static BOOL update_send_notify_icon_create(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_update(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const NOTIFY_ICON_STATE_ORDER* iconStateOrder) { return update_send_new_or_existing_notification_icons(context, orderInfo, iconStateOrder); } static BOOL update_send_notify_icon_delete(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 15; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; /* Write Hdr */ Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ Stream_Write_UINT32(s, orderInfo->windowId); /* WindowID (4 bytes) */ Stream_Write_UINT32(s, orderInfo->notifyIconId); /* NotifyIconId (4 bytes) */ update->numberOrders++; return TRUE; } static UINT16 update_calculate_monitored_desktop(const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT16 orderSize = 7; if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { orderSize += 4; } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { orderSize += 1 + (4 * monitoredDesktop->numWindowIds); } return orderSize; } static BOOL update_send_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo, const MONITORED_DESKTOP_ORDER* monitoredDesktop) { UINT32 i; wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = update_calculate_monitored_desktop(orderInfo, monitoredDesktop); update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ACTIVE_WND) { Stream_Write_UINT32(s, monitoredDesktop->activeWindowId); /* activeWindowId (4 bytes) */ } if (orderInfo->fieldFlags & WINDOW_ORDER_FIELD_DESKTOP_ZORDER) { Stream_Write_UINT8(s, monitoredDesktop->numWindowIds); /* numWindowIds (1 byte) */ /* windowIds */ for (i = 0; i < monitoredDesktop->numWindowIds; i++) { Stream_Write_UINT32(s, monitoredDesktop->windowIds[i]); } } update->numberOrders++; return TRUE; } static BOOL update_send_non_monitored_desktop(rdpContext* context, const WINDOW_ORDER_INFO* orderInfo) { wStream* s; rdpUpdate* update = context->update; BYTE controlFlags = ORDER_SECONDARY | (ORDER_TYPE_WINDOW << 2); UINT16 orderSize = 7; update_check_flush(context, orderSize); s = update->us; if (!s) return FALSE; Stream_Write_UINT8(s, controlFlags); /* Header (1 byte) */ Stream_Write_UINT16(s, orderSize); /* OrderSize (2 bytes) */ Stream_Write_UINT32(s, orderInfo->fieldFlags); /* FieldsPresentFlags (4 bytes) */ update->numberOrders++; return TRUE; } void update_register_server_callbacks(rdpUpdate* update) { update->BeginPaint = _update_begin_paint; update->EndPaint = _update_end_paint; update->SetBounds = update_set_bounds; update->Synchronize = update_send_synchronize; update->DesktopResize = update_send_desktop_resize; update->BitmapUpdate = update_send_bitmap_update; update->SurfaceBits = update_send_surface_bits; update->SurfaceFrameMarker = update_send_surface_frame_marker; update->SurfaceCommand = update_send_surface_command; update->SurfaceFrameBits = update_send_surface_frame_bits; update->PlaySound = update_send_play_sound; update->SetKeyboardIndicators = update_send_set_keyboard_indicators; update->SetKeyboardImeStatus = update_send_set_keyboard_ime_status; update->SaveSessionInfo = rdp_send_save_session_info; update->ServerStatusInfo = rdp_send_server_status_info; update->primary->DstBlt = update_send_dstblt; update->primary->PatBlt = update_send_patblt; update->primary->ScrBlt = update_send_scrblt; update->primary->OpaqueRect = update_send_opaque_rect; update->primary->LineTo = update_send_line_to; update->primary->MemBlt = update_send_memblt; update->primary->GlyphIndex = update_send_glyph_index; update->secondary->CacheBitmap = update_send_cache_bitmap; update->secondary->CacheBitmapV2 = update_send_cache_bitmap_v2; update->secondary->CacheBitmapV3 = update_send_cache_bitmap_v3; update->secondary->CacheColorTable = update_send_cache_color_table; update->secondary->CacheGlyph = update_send_cache_glyph; update->secondary->CacheGlyphV2 = update_send_cache_glyph_v2; update->secondary->CacheBrush = update_send_cache_brush; update->altsec->CreateOffscreenBitmap = update_send_create_offscreen_bitmap_order; update->altsec->SwitchSurface = update_send_switch_surface_order; update->pointer->PointerSystem = update_send_pointer_system; update->pointer->PointerPosition = update_send_pointer_position; update->pointer->PointerColor = update_send_pointer_color; update->pointer->PointerLarge = update_send_pointer_large; update->pointer->PointerNew = update_send_pointer_new; update->pointer->PointerCached = update_send_pointer_cached; update->window->WindowCreate = update_send_window_create; update->window->WindowUpdate = update_send_window_update; update->window->WindowIcon = update_send_window_icon; update->window->WindowCachedIcon = update_send_window_cached_icon; update->window->WindowDelete = update_send_window_delete; update->window->NotifyIconCreate = update_send_notify_icon_create; update->window->NotifyIconUpdate = update_send_notify_icon_update; update->window->NotifyIconDelete = update_send_notify_icon_delete; update->window->MonitoredDesktop = update_send_monitored_desktop; update->window->NonMonitoredDesktop = update_send_non_monitored_desktop; } void update_register_client_callbacks(rdpUpdate* update) { update->RefreshRect = update_send_refresh_rect; update->SuppressOutput = update_send_suppress_output; update->SurfaceFrameAcknowledge = update_send_frame_acknowledge; } int update_process_messages(rdpUpdate* update) { return update_message_queue_process_pending_messages(update); } static void update_free_queued_message(void* obj) { wMessage* msg = (wMessage*)obj; update_message_queue_free_message(msg); } void update_free_window_state(WINDOW_STATE_ORDER* window_state) { if (!window_state) return; free(window_state->OverlayDescription.string); free(window_state->titleInfo.string); free(window_state->windowRects); free(window_state->visibilityRects); memset(window_state, 0, sizeof(WINDOW_STATE_ORDER)); } rdpUpdate* update_new(rdpRdp* rdp) { const wObject cb = { NULL, NULL, NULL, update_free_queued_message, NULL }; rdpUpdate* update; OFFSCREEN_DELETE_LIST* deleteList; WINPR_UNUSED(rdp); update = (rdpUpdate*)calloc(1, sizeof(rdpUpdate)); if (!update) return NULL; update->log = WLog_Get("com.freerdp.core.update"); InitializeCriticalSection(&(update->mux)); update->pointer = (rdpPointerUpdate*)calloc(1, sizeof(rdpPointerUpdate)); if (!update->pointer) goto fail; update->primary = (rdpPrimaryUpdate*)calloc(1, sizeof(rdpPrimaryUpdate)); if (!update->primary) goto fail; update->secondary = (rdpSecondaryUpdate*)calloc(1, sizeof(rdpSecondaryUpdate)); if (!update->secondary) goto fail; update->altsec = (rdpAltSecUpdate*)calloc(1, sizeof(rdpAltSecUpdate)); if (!update->altsec) goto fail; update->window = (rdpWindowUpdate*)calloc(1, sizeof(rdpWindowUpdate)); if (!update->window) goto fail; deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); deleteList->sIndices = 64; deleteList->indices = calloc(deleteList->sIndices, 2); if (!deleteList->indices) goto fail; deleteList->cIndices = 0; update->SuppressOutput = update_send_suppress_output; update->initialState = TRUE; update->autoCalculateBitmapData = TRUE; update->queue = MessageQueue_New(&cb); if (!update->queue) goto fail; return update; fail: update_free(update); return NULL; } void update_free(rdpUpdate* update) { if (update != NULL) { OFFSCREEN_DELETE_LIST* deleteList = &(update->altsec->create_offscreen_bitmap.deleteList); if (deleteList) free(deleteList->indices); free(update->pointer); if (update->primary) { free(update->primary->polyline.points); free(update->primary->polygon_sc.points); free(update->primary->fast_glyph.glyphData.aj); free(update->primary); } free(update->secondary); free(update->altsec); if (update->window) { free(update->window); } MessageQueue_Free(update->queue); DeleteCriticalSection(&update->mux); free(update); } } BOOL update_begin_paint(rdpUpdate* update) { if (!update) return FALSE; EnterCriticalSection(&update->mux); if (!update->BeginPaint) return TRUE; return update->BeginPaint(update->context); } BOOL update_end_paint(rdpUpdate* update) { BOOL rc = FALSE; if (!update) return FALSE; if (update->EndPaint) rc = update->EndPaint(update->context); LeaveCriticalSection(&update->mux); return rc; }
./CrossVul/dataset_final_sorted/CWE-119/c/good_3908_0
crossvul-cpp_data_good_464_0
/* * Microsoft Advanced Streaming Format demuxer * Copyright (c) 2014 Alexandra Hájková * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/avstring.h" #include "libavutil/bswap.h" #include "libavutil/common.h" #include "libavutil/dict.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/time_internal.h" #include "avformat.h" #include "avio_internal.h" #include "avlanguage.h" #include "id3v2.h" #include "internal.h" #include "riff.h" #include "asf.h" #include "asfcrypt.h" #define ASF_BOOL 0x2 #define ASF_WORD 0x5 #define ASF_GUID 0x6 #define ASF_DWORD 0x3 #define ASF_QWORD 0x4 #define ASF_UNICODE 0x0 #define ASF_FLAG_BROADCAST 0x1 #define ASF_BYTE_ARRAY 0x1 #define ASF_TYPE_AUDIO 0x2 #define ASF_TYPE_VIDEO 0x1 #define ASF_STREAM_NUM 0x7F #define ASF_MAX_STREAMS 128 #define BMP_HEADER_SIZE 40 #define ASF_NUM_OF_PAYLOADS 0x3F #define ASF_ERROR_CORRECTION_LENGTH_TYPE 0x60 #define ASF_PACKET_ERROR_CORRECTION_DATA_SIZE 0x2 typedef struct GUIDParseTable { const char *name; ff_asf_guid guid; int (*read_object)(AVFormatContext *, const struct GUIDParseTable *); int is_subobject; } GUIDParseTable; typedef struct ASFPacket { AVPacket avpkt; int64_t dts; uint32_t frame_num; // ASF payloads with the same number are parts of the same frame int flags; int data_size; int duration; int size_left; uint8_t stream_index; } ASFPacket; typedef struct ASFStream { uint8_t stream_index; // from packet header int index; // stream index in AVFormatContext, set in asf_read_stream_properties int type; int indexed; // added index entries from the Simple Index Object or not int8_t span; // for deinterleaving uint16_t virtual_pkt_len; uint16_t virtual_chunk_len; int16_t lang_idx; ASFPacket pkt; } ASFStream; typedef struct ASFStreamData{ char langs[32]; AVDictionary *asf_met; // for storing per-stream metadata AVRational aspect_ratio; } ASFStreamData; typedef struct ASFContext { int data_reached; int is_simple_index; // is simple index present or not 1/0 int is_header; uint64_t preroll; uint64_t nb_packets; // ASF packets uint32_t packet_size; int64_t send_time; int duration; uint32_t b_flags; // flags with broadcast flag uint32_t prop_flags; // file properties object flags uint64_t data_size; // data object size uint64_t unknown_size; // size of the unknown object int64_t offset; // offset of the current object int64_t data_offset; int64_t first_packet_offset; // packet offset int64_t unknown_offset; // for top level header objects or subobjects without specified behavior // ASF file must not contain more than 128 streams according to the specification ASFStream *asf_st[ASF_MAX_STREAMS]; ASFStreamData asf_sd[ASF_MAX_STREAMS]; int nb_streams; int stream_index; // from packet header, for the subpayload case // packet parameters uint64_t sub_header_offset; // offset of subpayload header int64_t sub_dts; uint8_t dts_delta; // for subpayloads uint32_t packet_size_internal; // packet size stored inside ASFPacket, can be 0 int64_t packet_offset; // offset of the current packet inside Data Object uint32_t pad_len; // padding after payload uint32_t rep_data_len; // packet state uint64_t sub_left; // subpayloads left or not unsigned int nb_sub; // number of subpayloads read so far from the current ASF packet uint16_t mult_sub_len; // total length of subpayloads array inside multiple payload uint64_t nb_mult_left; // multiple payloads left int return_subpayload; enum { PARSE_PACKET_HEADER, READ_SINGLE, READ_MULTI, READ_MULTI_SUB } state; } ASFContext; static int detect_unknown_subobject(AVFormatContext *s, int64_t offset, int64_t size); static const GUIDParseTable *find_guid(ff_asf_guid guid); static int asf_probe(AVProbeData *pd) { /* check file header */ if (!ff_guidcmp(pd->buf, &ff_asf_header)) return AVPROBE_SCORE_MAX/2; else return 0; } static void swap_guid(ff_asf_guid guid) { FFSWAP(unsigned char, guid[0], guid[3]); FFSWAP(unsigned char, guid[1], guid[2]); FFSWAP(unsigned char, guid[4], guid[5]); FFSWAP(unsigned char, guid[6], guid[7]); } static void align_position(AVIOContext *pb, int64_t offset, uint64_t size) { if (size < INT64_MAX - offset && avio_tell(pb) != offset + size) avio_seek(pb, offset + size, SEEK_SET); } static int asf_read_unknown(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); int ret; if (size > INT64_MAX) return AVERROR_INVALIDDATA; if (asf->is_header) asf->unknown_size = size; asf->is_header = 0; if (!g->is_subobject) { if (!(ret = strcmp(g->name, "Header Extension"))) avio_skip(pb, 22); // skip reserved fields and Data Size if ((ret = detect_unknown_subobject(s, asf->unknown_offset, asf->unknown_size)) < 0) return ret; } else { if (size < 24) { av_log(s, AV_LOG_ERROR, "Too small size %"PRIu64" (< 24).\n", size); return AVERROR_INVALIDDATA; } avio_skip(pb, size - 24); } return 0; } static int get_asf_string(AVIOContext *pb, int maxlen, char *buf, int buflen) { char *q = buf; int ret = 0; if (buflen <= 0) return AVERROR(EINVAL); while (ret + 1 < maxlen) { uint8_t tmp; uint32_t ch; GET_UTF16(ch, (ret += 2) <= maxlen ? avio_rl16(pb) : 0, break;); PUT_UTF8(ch, tmp, if (q - buf < buflen - 1) *q++ = tmp;) } *q = 0; return ret; } static int asf_read_marker(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); int i, nb_markers, ret; size_t len; char name[1024]; avio_skip(pb, 8); avio_skip(pb, 8); // skip reserved GUID nb_markers = avio_rl32(pb); avio_skip(pb, 2); // skip reserved field len = avio_rl16(pb); for (i = 0; i < len; i++) avio_skip(pb, 1); for (i = 0; i < nb_markers; i++) { int64_t pts; avio_skip(pb, 8); pts = avio_rl64(pb); pts -= asf->preroll * 10000; avio_skip(pb, 2); // entry length avio_skip(pb, 4); // send time avio_skip(pb, 4); // flags len = avio_rl32(pb); if ((ret = avio_get_str16le(pb, len, name, sizeof(name))) < len) avio_skip(pb, len - ret); avpriv_new_chapter(s, i, (AVRational) { 1, 10000000 }, pts, AV_NOPTS_VALUE, name); } align_position(pb, asf->offset, size); return 0; } static int asf_read_metadata(AVFormatContext *s, const char *title, uint16_t len, unsigned char *ch, uint16_t buflen) { AVIOContext *pb = s->pb; avio_get_str16le(pb, len, ch, buflen); if (ch[0]) { if (av_dict_set(&s->metadata, title, ch, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } return 0; } static int asf_read_value(AVFormatContext *s, const uint8_t *name, uint16_t val_len, int type, AVDictionary **met) { int ret; uint8_t *value; uint16_t buflen = 2 * val_len + 1; AVIOContext *pb = s->pb; value = av_malloc(buflen); if (!value) return AVERROR(ENOMEM); if (type == ASF_UNICODE) { // get_asf_string reads UTF-16 and converts it to UTF-8 which needs longer buffer if ((ret = get_asf_string(pb, val_len, value, buflen)) < 0) goto failed; if (av_dict_set(met, name, value, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } else { char buf[256]; if (val_len > sizeof(buf)) { ret = AVERROR_INVALIDDATA; goto failed; } if ((ret = avio_read(pb, value, val_len)) < 0) goto failed; if (ret < 2 * val_len) value[ret] = '\0'; else value[2 * val_len - 1] = '\0'; snprintf(buf, sizeof(buf), "%s", value); if (av_dict_set(met, name, buf, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } av_freep(&value); return 0; failed: av_freep(&value); return ret; } static int asf_read_generic_value(AVIOContext *pb, int type, uint64_t *value) { switch (type) { case ASF_BOOL: *value = avio_rl16(pb); break; case ASF_DWORD: *value = avio_rl32(pb); break; case ASF_QWORD: *value = avio_rl64(pb); break; case ASF_WORD: *value = avio_rl16(pb); break; default: return AVERROR_INVALIDDATA; } return 0; } static int asf_set_metadata(AVFormatContext *s, const uint8_t *name, int type, AVDictionary **met) { AVIOContext *pb = s->pb; uint64_t value; char buf[32]; int ret; ret = asf_read_generic_value(pb, type, &value); if (ret < 0) return ret; snprintf(buf, sizeof(buf), "%"PRIu64, value); if (av_dict_set(met, name, buf, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); return 0; } /* MSDN claims that this should be "compatible with the ID3 frame, APIC", * but in reality this is only loosely similar */ static int asf_read_picture(AVFormatContext *s, int len) { ASFContext *asf = s->priv_data; AVPacket pkt = { 0 }; const CodecMime *mime = ff_id3v2_mime_tags; enum AVCodecID id = AV_CODEC_ID_NONE; char mimetype[64]; uint8_t *desc = NULL; AVStream *st = NULL; int ret, type, picsize, desc_len; ASFStream *asf_st; /* type + picsize + mime + desc */ if (len < 1 + 4 + 2 + 2) { av_log(s, AV_LOG_ERROR, "Invalid attached picture size: %d.\n", len); return AVERROR_INVALIDDATA; } /* picture type */ type = avio_r8(s->pb); len--; if (type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types) || type < 0) { av_log(s, AV_LOG_WARNING, "Unknown attached picture type: %d.\n", type); type = 0; } /* picture data size */ picsize = avio_rl32(s->pb); len -= 4; /* picture MIME type */ len -= avio_get_str16le(s->pb, len, mimetype, sizeof(mimetype)); while (mime->id != AV_CODEC_ID_NONE) { if (!strncmp(mime->str, mimetype, sizeof(mimetype))) { id = mime->id; break; } mime++; } if (id == AV_CODEC_ID_NONE) { av_log(s, AV_LOG_ERROR, "Unknown attached picture mimetype: %s.\n", mimetype); return 0; } if (picsize >= len) { av_log(s, AV_LOG_ERROR, "Invalid attached picture data size: %d >= %d.\n", picsize, len); return AVERROR_INVALIDDATA; } /* picture description */ desc_len = (len - picsize) * 2 + 1; desc = av_malloc(desc_len); if (!desc) return AVERROR(ENOMEM); len -= avio_get_str16le(s->pb, len - picsize, desc, desc_len); ret = av_get_packet(s->pb, &pkt, picsize); if (ret < 0) goto fail; st = avformat_new_stream(s, NULL); if (!st) { ret = AVERROR(ENOMEM); goto fail; } asf->asf_st[asf->nb_streams] = av_mallocz(sizeof(*asf_st)); asf_st = asf->asf_st[asf->nb_streams]; if (!asf_st) { ret = AVERROR(ENOMEM); goto fail; } st->disposition |= AV_DISPOSITION_ATTACHED_PIC; st->codecpar->codec_type = asf_st->type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = id; st->attached_pic = pkt; st->attached_pic.stream_index = asf_st->index = st->index; st->attached_pic.flags |= AV_PKT_FLAG_KEY; asf->nb_streams++; if (*desc) { if (av_dict_set(&st->metadata, "title", desc, AV_DICT_DONT_STRDUP_VAL) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } else av_freep(&desc); if (av_dict_set(&st->metadata, "comment", ff_id3v2_picture_types[type], 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); return 0; fail: av_freep(&desc); av_packet_unref(&pkt); return ret; } static void get_id3_tag(AVFormatContext *s, int len) { ID3v2ExtraMeta *id3v2_extra_meta = NULL; ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, len); if (id3v2_extra_meta) { ff_id3v2_parse_apic(s, &id3v2_extra_meta); ff_id3v2_parse_chapters(s, &id3v2_extra_meta); } ff_id3v2_free_extra_meta(&id3v2_extra_meta); } static int process_metadata(AVFormatContext *s, const uint8_t *name, uint16_t name_len, uint16_t val_len, uint16_t type, AVDictionary **met) { int ret; ff_asf_guid guid; if (val_len) { switch (type) { case ASF_UNICODE: asf_read_value(s, name, val_len, type, met); break; case ASF_BYTE_ARRAY: if (!strcmp(name, "WM/Picture")) // handle cover art asf_read_picture(s, val_len); else if (!strcmp(name, "ID3")) // handle ID3 tag get_id3_tag(s, val_len); else asf_read_value(s, name, val_len, type, met); break; case ASF_GUID: ff_get_guid(s->pb, &guid); break; default: if ((ret = asf_set_metadata(s, name, type, met)) < 0) return ret; break; } } return 0; } static int asf_read_ext_content(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); uint16_t nb_desc = avio_rl16(pb); int i, ret; for (i = 0; i < nb_desc; i++) { uint16_t name_len, type, val_len; uint8_t *name = NULL; name_len = avio_rl16(pb); if (!name_len) return AVERROR_INVALIDDATA; name = av_malloc(name_len); if (!name) return AVERROR(ENOMEM); avio_get_str16le(pb, name_len, name, name_len); type = avio_rl16(pb); // BOOL values are 16 bits long in the Metadata Object // but 32 bits long in the Extended Content Description Object if (type == ASF_BOOL) type = ASF_DWORD; val_len = avio_rl16(pb); ret = process_metadata(s, name, name_len, val_len, type, &s->metadata); av_freep(&name); if (ret < 0) return ret; } align_position(pb, asf->offset, size); return 0; } static AVStream *find_stream(AVFormatContext *s, uint16_t st_num) { AVStream *st = NULL; ASFContext *asf = s->priv_data; int i; for (i = 0; i < asf->nb_streams; i++) { if (asf->asf_st[i]->stream_index == st_num) { st = s->streams[asf->asf_st[i]->index]; break; } } return st; } static int asf_store_aspect_ratio(AVFormatContext *s, uint8_t st_num, uint8_t *name, int type) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t value = 0; int ret; ret = asf_read_generic_value(pb, type, &value); if (ret < 0) return ret; if (st_num < ASF_MAX_STREAMS) { if (!strcmp(name, "AspectRatioX")) asf->asf_sd[st_num].aspect_ratio.num = value; else asf->asf_sd[st_num].aspect_ratio.den = value; } return 0; } static int asf_read_metadata_obj(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); uint16_t nb_recs = avio_rl16(pb); // number of records in the Description Records list int i, ret; for (i = 0; i < nb_recs; i++) { uint16_t name_len, buflen, type, val_len, st_num; uint8_t *name = NULL; avio_skip(pb, 2); // skip reserved field st_num = avio_rl16(pb); name_len = avio_rl16(pb); buflen = 2 * name_len + 1; if (!name_len) break; type = avio_rl16(pb); val_len = avio_rl32(pb); name = av_malloc(buflen); if (!name) return AVERROR(ENOMEM); avio_get_str16le(pb, name_len, name, buflen); if (!strcmp(name, "AspectRatioX") || !strcmp(name, "AspectRatioY")) { ret = asf_store_aspect_ratio(s, st_num, name, type); if (ret < 0) { av_freep(&name); break; } } else { if (st_num < ASF_MAX_STREAMS) { if ((ret = process_metadata(s, name, name_len, val_len, type, &asf->asf_sd[st_num].asf_met)) < 0) { av_freep(&name); break; } } } av_freep(&name); } align_position(pb, asf->offset, size); return 0; } static int asf_read_content_desc(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int i; static const char *const titles[] = { "Title", "Author", "Copyright", "Description", "Rate" }; uint16_t len[5], buflen[5] = { 0 }; uint8_t *ch; uint64_t size = avio_rl64(pb); for (i = 0; i < 5; i++) { len[i] = avio_rl16(pb); // utf8 string should be <= 2 * utf16 string, extra byte for the terminator buflen[i] = 2 * len[i] + 1; } for (i = 0; i < 5; i++) { ch = av_malloc(buflen[i]); if (!ch) return(AVERROR(ENOMEM)); asf_read_metadata(s, titles[i], len[i], ch, buflen[i]); av_freep(&ch); } align_position(pb, asf->offset, size); return 0; } static int asf_read_properties(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; time_t creation_time; avio_rl64(pb); // read object size avio_skip(pb, 16); // skip File ID avio_skip(pb, 8); // skip File size creation_time = avio_rl64(pb); if (!(asf->b_flags & ASF_FLAG_BROADCAST)) { struct tm tmbuf; struct tm *tm; char buf[64]; // creation date is in 100 ns units from 1 Jan 1601, conversion to s creation_time /= 10000000; // there are 11644473600 seconds between 1 Jan 1601 and 1 Jan 1970 creation_time -= 11644473600; tm = gmtime_r(&creation_time, &tmbuf); if (tm) { if (!strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", tm)) buf[0] = '\0'; } else buf[0] = '\0'; if (buf[0]) { if (av_dict_set(&s->metadata, "creation_time", buf, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } } asf->nb_packets = avio_rl64(pb); asf->duration = avio_rl64(pb) / 10000; // stream duration avio_skip(pb, 8); // skip send duration asf->preroll = avio_rl64(pb); asf->duration -= asf->preroll; asf->b_flags = avio_rl32(pb); avio_skip(pb, 4); // skip minimal packet size asf->packet_size = avio_rl32(pb); avio_skip(pb, 4); // skip max_bitrate return 0; } static int parse_video_info(AVIOContext *pb, AVStream *st) { uint16_t size_asf; // ASF-specific Format Data size uint32_t size_bmp; // BMP_HEADER-specific Format Data size unsigned int tag; st->codecpar->width = avio_rl32(pb); st->codecpar->height = avio_rl32(pb); avio_skip(pb, 1); // skip reserved flags size_asf = avio_rl16(pb); tag = ff_get_bmp_header(pb, st, &size_bmp); st->codecpar->codec_tag = tag; st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag); size_bmp = FFMAX(size_asf, size_bmp); if (size_bmp > BMP_HEADER_SIZE && size_bmp < INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { int ret; st->codecpar->extradata_size = size_bmp - BMP_HEADER_SIZE; if (!(st->codecpar->extradata = av_malloc(st->codecpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE))) { st->codecpar->extradata_size = 0; return AVERROR(ENOMEM); } memset(st->codecpar->extradata + st->codecpar->extradata_size , 0, AV_INPUT_BUFFER_PADDING_SIZE); if ((ret = avio_read(pb, st->codecpar->extradata, st->codecpar->extradata_size)) < 0) return ret; } return 0; } static int asf_read_stream_properties(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size; uint32_t err_data_len, ts_data_len; // type specific data length uint16_t flags; ff_asf_guid stream_type; enum AVMediaType type; int i, ret; uint8_t stream_index; AVStream *st; ASFStream *asf_st; // ASF file must not contain more than 128 streams according to the specification if (asf->nb_streams >= ASF_MAX_STREAMS) return AVERROR_INVALIDDATA; size = avio_rl64(pb); ff_get_guid(pb, &stream_type); if (!ff_guidcmp(&stream_type, &ff_asf_audio_stream)) type = AVMEDIA_TYPE_AUDIO; else if (!ff_guidcmp(&stream_type, &ff_asf_video_stream)) type = AVMEDIA_TYPE_VIDEO; else if (!ff_guidcmp(&stream_type, &ff_asf_jfif_media)) type = AVMEDIA_TYPE_VIDEO; else if (!ff_guidcmp(&stream_type, &ff_asf_command_stream)) type = AVMEDIA_TYPE_DATA; else if (!ff_guidcmp(&stream_type, &ff_asf_ext_stream_embed_stream_header)) type = AVMEDIA_TYPE_UNKNOWN; else return AVERROR_INVALIDDATA; ff_get_guid(pb, &stream_type); // error correction type avio_skip(pb, 8); // skip the time offset ts_data_len = avio_rl32(pb); err_data_len = avio_rl32(pb); flags = avio_rl16(pb); // bit 15 - Encrypted Content stream_index = flags & ASF_STREAM_NUM; for (i = 0; i < asf->nb_streams; i++) if (stream_index == asf->asf_st[i]->stream_index) { av_log(s, AV_LOG_WARNING, "Duplicate stream found, this stream will be ignored.\n"); align_position(pb, asf->offset, size); return 0; } st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 32, 1, 1000); // pts should be dword, in milliseconds st->codecpar->codec_type = type; asf->asf_st[asf->nb_streams] = av_mallocz(sizeof(*asf_st)); if (!asf->asf_st[asf->nb_streams]) return AVERROR(ENOMEM); asf_st = asf->asf_st[asf->nb_streams]; asf->nb_streams++; asf_st->stream_index = stream_index; asf_st->index = st->index; asf_st->indexed = 0; st->id = flags & ASF_STREAM_NUM; av_init_packet(&asf_st->pkt.avpkt); asf_st->pkt.data_size = 0; avio_skip(pb, 4); // skip reserved field switch (type) { case AVMEDIA_TYPE_AUDIO: asf_st->type = AVMEDIA_TYPE_AUDIO; if ((ret = ff_get_wav_header(s, pb, st->codecpar, ts_data_len, 0)) < 0) return ret; break; case AVMEDIA_TYPE_VIDEO: asf_st->type = AVMEDIA_TYPE_VIDEO; if ((ret = parse_video_info(pb, st)) < 0) return ret; break; default: avio_skip(pb, ts_data_len); break; } if (err_data_len) { if (type == AVMEDIA_TYPE_AUDIO) { uint8_t span = avio_r8(pb); if (span > 1) { asf_st->span = span; asf_st->virtual_pkt_len = avio_rl16(pb); asf_st->virtual_chunk_len = avio_rl16(pb); if (!asf_st->virtual_chunk_len || !asf_st->virtual_pkt_len) return AVERROR_INVALIDDATA; avio_skip(pb, err_data_len - 5); } else avio_skip(pb, err_data_len - 1); } else avio_skip(pb, err_data_len); } align_position(pb, asf->offset, size); return 0; } static void set_language(AVFormatContext *s, const char *rfc1766, AVDictionary **met) { // language abbr should contain at least 2 chars if (rfc1766 && strlen(rfc1766) > 1) { const char primary_tag[3] = { rfc1766[0], rfc1766[1], '\0' }; // ignore country code if any const char *iso6392 = ff_convert_lang_to(primary_tag, AV_LANG_ISO639_2_BIBL); if (iso6392) if (av_dict_set(met, "language", iso6392, 0) < 0) av_log(s, AV_LOG_WARNING, "av_dict_set failed.\n"); } } static int asf_read_ext_stream_properties(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = NULL; ff_asf_guid guid; uint16_t nb_st_name, nb_pay_exts, st_num, lang_idx; int i, ret; uint32_t bitrate; uint64_t start_time, end_time, time_per_frame; uint64_t size = avio_rl64(pb); start_time = avio_rl64(pb); end_time = avio_rl64(pb); bitrate = avio_rl32(pb); avio_skip(pb, 28); // skip some unused values st_num = avio_rl16(pb); st_num &= ASF_STREAM_NUM; lang_idx = avio_rl16(pb); // Stream Language ID Index for (i = 0; i < asf->nb_streams; i++) { if (st_num == asf->asf_st[i]->stream_index) { st = s->streams[asf->asf_st[i]->index]; asf->asf_st[i]->lang_idx = lang_idx; break; } } time_per_frame = avio_rl64(pb); // average time per frame if (st) { st->start_time = start_time; st->duration = end_time - start_time; st->codecpar->bit_rate = bitrate; st->avg_frame_rate.num = 10000000; st->avg_frame_rate.den = time_per_frame; } nb_st_name = avio_rl16(pb); nb_pay_exts = avio_rl16(pb); for (i = 0; i < nb_st_name; i++) { uint16_t len; avio_rl16(pb); // Language ID Index len = avio_rl16(pb); avio_skip(pb, len); } for (i = 0; i < nb_pay_exts; i++) { uint32_t len; avio_skip(pb, 16); // Extension System ID avio_skip(pb, 2); // Extension Data Size len = avio_rl32(pb); avio_skip(pb, len); } if ((ret = ff_get_guid(pb, &guid)) < 0) { align_position(pb, asf->offset, size); return 0; } g = find_guid(guid); if (g && !(strcmp(g->name, "Stream Properties"))) { if ((ret = g->read_object(s, g)) < 0) return ret; } align_position(pb, asf->offset, size); return 0; } static int asf_read_language_list(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int i, ret; uint64_t size = avio_rl64(pb); uint16_t nb_langs = avio_rl16(pb); if (nb_langs < ASF_MAX_STREAMS) { for (i = 0; i < nb_langs; i++) { size_t len; len = avio_r8(pb); if (!len) len = 6; if ((ret = get_asf_string(pb, len, asf->asf_sd[i].langs, sizeof(asf->asf_sd[i].langs))) < 0) { return ret; } } } align_position(pb, asf->offset, size); return 0; } // returns data object offset when reading this object for the first time static int asf_read_data(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = asf->data_size = avio_rl64(pb); int i; if (!asf->data_reached) { asf->data_reached = 1; asf->data_offset = asf->offset; } for (i = 0; i < asf->nb_streams; i++) { if (!(asf->b_flags & ASF_FLAG_BROADCAST)) s->streams[i]->duration = asf->duration; } asf->nb_mult_left = 0; asf->sub_left = 0; asf->state = PARSE_PACKET_HEADER; asf->return_subpayload = 0; asf->packet_size_internal = 0; avio_skip(pb, 16); // skip File ID size = avio_rl64(pb); // Total Data Packets if (size != asf->nb_packets) av_log(s, AV_LOG_WARNING, "Number of Packets from File Properties Object is not equal to Total" "Datapackets value! num of packets %"PRIu64" total num %"PRIu64".\n", size, asf->nb_packets); avio_skip(pb, 2); // skip reserved field asf->first_packet_offset = avio_tell(pb); if ((pb->seekable & AVIO_SEEKABLE_NORMAL) && !(asf->b_flags & ASF_FLAG_BROADCAST)) align_position(pb, asf->offset, asf->data_size); return 0; } static int asf_read_simple_index(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = NULL; uint64_t interval; // index entry time interval in 100 ns units, usually it's 1s uint32_t pkt_num, nb_entries; int32_t prev_pkt_num = -1; int i; int64_t offset; uint64_t size = avio_rl64(pb); // simple index objects should be ordered by stream number, this loop tries to find // the first not indexed video stream for (i = 0; i < asf->nb_streams; i++) { if ((asf->asf_st[i]->type == AVMEDIA_TYPE_VIDEO) && !asf->asf_st[i]->indexed) { asf->asf_st[i]->indexed = 1; st = s->streams[asf->asf_st[i]->index]; break; } } if (!st) { avio_skip(pb, size - 24); // if there's no video stream, skip index object return 0; } avio_skip(pb, 16); // skip File ID interval = avio_rl64(pb); avio_skip(pb, 4); nb_entries = avio_rl32(pb); for (i = 0; i < nb_entries; i++) { pkt_num = avio_rl32(pb); offset = avio_skip(pb, 2); if (offset < 0) { av_log(s, AV_LOG_ERROR, "Skipping failed in asf_read_simple_index.\n"); return offset; } if (prev_pkt_num != pkt_num) { av_add_index_entry(st, asf->first_packet_offset + asf->packet_size * pkt_num, av_rescale(interval, i, 10000), asf->packet_size, 0, AVINDEX_KEYFRAME); prev_pkt_num = pkt_num; } } asf->is_simple_index = 1; align_position(pb, asf->offset, size); return 0; } static const GUIDParseTable gdef[] = { { "Data", { 0x75, 0xB2, 0x26, 0x36, 0x66, 0x8E, 0x11, 0xCF, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C }, asf_read_data, 1 }, { "Simple Index", { 0x33, 0x00, 0x08, 0x90, 0xE5, 0xB1, 0x11, 0xCF, 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB }, asf_read_simple_index, 1 }, { "Content Description", { 0x75, 0xB2, 0x26, 0x33, 0x66 ,0x8E, 0x11, 0xCF, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C }, asf_read_content_desc, 1 }, { "Extended Content Description", { 0xD2, 0xD0, 0xA4, 0x40, 0xE3, 0x07, 0x11, 0xD2, 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5e, 0xA8, 0x50 }, asf_read_ext_content, 1 }, { "Stream Bitrate Properties", { 0x7B, 0xF8, 0x75, 0xCE, 0x46, 0x8D, 0x11, 0xD1, 0x8D, 0x82, 0x00, 0x60, 0x97, 0xC9, 0xA2, 0xB2 }, asf_read_unknown, 1 }, { "File Properties", { 0x8C, 0xAB, 0xDC, 0xA1, 0xA9, 0x47, 0x11, 0xCF, 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 }, asf_read_properties, 1 }, { "Header Extension", { 0x5F, 0xBF, 0x03, 0xB5, 0xA9, 0x2E, 0x11, 0xCF, 0x8E, 0xE3, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 }, asf_read_unknown, 0 }, { "Stream Properties", { 0xB7, 0xDC, 0x07, 0x91, 0xA9, 0xB7, 0x11, 0xCF, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 }, asf_read_stream_properties, 1 }, { "Codec List", { 0x86, 0xD1, 0x52, 0x40, 0x31, 0x1D, 0x11, 0xD0, 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 }, asf_read_unknown, 1 }, { "Marker", { 0xF4, 0x87, 0xCD, 0x01, 0xA9, 0x51, 0x11, 0xCF, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 }, asf_read_marker, 1 }, { "Script Command", { 0x1E, 0xFB, 0x1A, 0x30, 0x0B, 0x62, 0x11, 0xD0, 0xA3, 0x9B, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 }, asf_read_unknown, 1 }, { "Language List", { 0x7C, 0x43, 0x46, 0xa9, 0xef, 0xe0, 0x4B, 0xFC, 0xB2, 0x29, 0x39, 0x3e, 0xde, 0x41, 0x5c, 0x85 }, asf_read_language_list, 1}, { "Padding", { 0x18, 0x06, 0xD4, 0x74, 0xCA, 0xDF, 0x45, 0x09, 0xA4, 0xBA, 0x9A, 0xAB, 0xCB, 0x96, 0xAA, 0xE8 }, asf_read_unknown, 1 }, { "DRMv1 Header", { 0x22, 0x11, 0xB3, 0xFB, 0xBD, 0x23, 0x11, 0xD2, 0xB4, 0xB7, 0x00, 0xA0, 0xC9, 0x55, 0xFC, 0x6E }, asf_read_unknown, 1 }, { "DRMv2 Header", { 0x29, 0x8A, 0xE6, 0x14, 0x26, 0x22, 0x4C, 0x17, 0xB9, 0x35, 0xDA, 0xE0, 0x7E, 0xE9, 0x28, 0x9c }, asf_read_unknown, 1 }, { "Index", { 0xD6, 0xE2, 0x29, 0xD3, 0x35, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Media Object Index", { 0xFE, 0xB1, 0x03, 0xF8, 0x12, 0xAD, 0x4C, 0x64, 0x84, 0x0F, 0x2A, 0x1D, 0x2F, 0x7A, 0xD4, 0x8C }, asf_read_unknown, 1 }, { "Timecode Index", { 0x3C, 0xB7, 0x3F, 0xD0, 0x0C, 0x4A, 0x48, 0x03, 0x95, 0x3D, 0xED, 0xF7, 0xB6, 0x22, 0x8F, 0x0C }, asf_read_unknown, 0 }, { "Bitrate_Mutual_Exclusion", { 0xD6, 0xE2, 0x29, 0xDC, 0x35, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Error Correction", { 0x75, 0xB2, 0x26, 0x35, 0x66, 0x8E, 0x11, 0xCF, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C }, asf_read_unknown, 1 }, { "Content Branding", { 0x22, 0x11, 0xB3, 0xFA, 0xBD, 0x23, 0x11, 0xD2, 0xB4, 0xB7, 0x00, 0xA0, 0xC9, 0x55, 0xFC, 0x6E }, asf_read_unknown, 1 }, { "Content Encryption", { 0x22, 0x11, 0xB3, 0xFB, 0xBD, 0x23, 0x11, 0xD2, 0xB4, 0xB7, 0x00, 0xA0, 0xC9, 0x55, 0xFC, 0x6E }, asf_read_unknown, 1 }, { "Extended Content Encryption", { 0x29, 0x8A, 0xE6, 0x14, 0x26, 0x22, 0x4C, 0x17, 0xB9, 0x35, 0xDA, 0xE0, 0x7E, 0xE9, 0x28, 0x9C }, asf_read_unknown, 1 }, { "Digital Signature", { 0x22, 0x11, 0xB3, 0xFC, 0xBD, 0x23, 0x11, 0xD2, 0xB4, 0xB7, 0x00, 0xA0, 0xC9, 0x55, 0xFC, 0x6E }, asf_read_unknown, 1 }, { "Extended Stream Properties", { 0x14, 0xE6, 0xA5, 0xCB, 0xC6, 0x72, 0x43, 0x32, 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A }, asf_read_ext_stream_properties, 1 }, { "Advanced Mutual Exclusion", { 0xA0, 0x86, 0x49, 0xCF, 0x47, 0x75, 0x46, 0x70, 0x8A, 0x16, 0x6E, 0x35, 0x35, 0x75, 0x66, 0xCD }, asf_read_unknown, 1 }, { "Group Mutual Exclusion", { 0xD1, 0x46, 0x5A, 0x40, 0x5A, 0x79, 0x43, 0x38, 0xB7, 0x1B, 0xE3, 0x6B, 0x8F, 0xD6, 0xC2, 0x49 }, asf_read_unknown, 1}, { "Stream Prioritization", { 0xD4, 0xFE, 0xD1, 0x5B, 0x88, 0xD3, 0x45, 0x4F, 0x81, 0xF0, 0xED, 0x5C, 0x45, 0x99, 0x9E, 0x24 }, asf_read_unknown, 1 }, { "Bandwidth Sharing Object", { 0xA6, 0x96, 0x09, 0xE6, 0x51, 0x7B, 0x11, 0xD2, 0xB6, 0xAF, 0x00, 0xC0, 0x4F, 0xD9, 0x08, 0xE9 }, asf_read_unknown, 1 }, { "Metadata", { 0xC5, 0xF8, 0xCB, 0xEA, 0x5B, 0xAF, 0x48, 0x77, 0x84, 0x67, 0xAA, 0x8C, 0x44, 0xFA, 0x4C, 0xCA }, asf_read_metadata_obj, 1 }, { "Metadata Library", { 0x44, 0x23, 0x1C, 0x94, 0x94, 0x98, 0x49, 0xD1, 0xA1, 0x41, 0x1D, 0x13, 0x4E, 0x45, 0x70, 0x54 }, asf_read_metadata_obj, 1 }, { "Audio Spread", { 0xBF, 0xC3, 0xCD, 0x50, 0x61, 0x8F, 0x11, 0xCF, 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20 }, asf_read_unknown, 1 }, { "Index Parameters", { 0xD6, 0xE2, 0x29, 0xDF, 0x35, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Content Encryption System Windows Media DRM Network Devices", { 0x7A, 0x07, 0x9B, 0xB6, 0xDA, 0XA4, 0x4e, 0x12, 0xA5, 0xCA, 0x91, 0xD3, 0x8D, 0xC1, 0x1A, 0x8D }, asf_read_unknown, 1 }, { "Mutex Language", { 0xD6, 0xE2, 0x2A, 0x00, 0x25, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Mutex Bitrate", { 0xD6, 0xE2, 0x2A, 0x01, 0x25, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Mutex Unknown", { 0xD6, 0xE2, 0x2A, 0x02, 0x25, 0xDA, 0x11, 0xD1, 0x90, 0x34, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xBE }, asf_read_unknown, 1 }, { "Bandwidth Sharing Exclusive", { 0xAF, 0x60, 0x60, 0xAA, 0x51, 0x97, 0x11, 0xD2, 0xB6, 0xAF, 0x00, 0xC0, 0x4F, 0xD9, 0x08, 0xE9 }, asf_read_unknown, 1 }, { "Bandwidth Sharing Partial", { 0xAF, 0x60, 0x60, 0xAB, 0x51, 0x97, 0x11, 0xD2, 0xB6, 0xAF, 0x00, 0xC0, 0x4F, 0xD9, 0x08, 0xE9 }, asf_read_unknown, 1 }, { "Payload Extension System Timecode", { 0x39, 0x95, 0x95, 0xEC, 0x86, 0x67, 0x4E, 0x2D, 0x8F, 0xDB, 0x98, 0x81, 0x4C, 0xE7, 0x6C, 0x1E }, asf_read_unknown, 1 }, { "Payload Extension System File Name", { 0xE1, 0x65, 0xEC, 0x0E, 0x19, 0xED, 0x45, 0xD7, 0xB4, 0xA7, 0x25, 0xCB, 0xD1, 0xE2, 0x8E, 0x9B }, asf_read_unknown, 1 }, { "Payload Extension System Content Type", { 0xD5, 0x90, 0xDC, 0x20, 0x07, 0xBC, 0x43, 0x6C, 0x9C, 0xF7, 0xF3, 0xBB, 0xFB, 0xF1, 0xA4, 0xDC }, asf_read_unknown, 1 }, { "Payload Extension System Pixel Aspect Ratio", { 0x1, 0x1E, 0xE5, 0x54, 0xF9, 0xEA, 0x4B, 0xC8, 0x82, 0x1A, 0x37, 0x6B, 0x74, 0xE4, 0xC4, 0xB8 }, asf_read_unknown, 1 }, { "Payload Extension System Sample Duration", { 0xC6, 0xBD, 0x94, 0x50, 0x86, 0x7F, 0x49, 0x07, 0x83, 0xA3, 0xC7, 0x79, 0x21, 0xB7, 0x33, 0xAD }, asf_read_unknown, 1 }, { "Payload Extension System Encryption Sample ID", { 0x66, 0x98, 0xB8, 0x4E, 0x0A, 0xFA, 0x43, 0x30, 0xAE, 0xB2, 0x1C, 0x0A, 0x98, 0xD7, 0xA4, 0x4D }, asf_read_unknown, 1 }, { "Payload Extension System Degradable JPEG", { 0x00, 0xE1, 0xAF, 0x06, 0x7B, 0xEC, 0x11, 0xD1, 0xA5, 0x82, 0x00, 0xC0, 0x4F, 0xC2, 0x9C, 0xFB }, asf_read_unknown, 1 }, }; #define READ_LEN(flag, name, len) \ do { \ if ((flag) == name ## IS_BYTE) \ len = avio_r8(pb); \ else if ((flag) == name ## IS_WORD) \ len = avio_rl16(pb); \ else if ((flag) == name ## IS_DWORD) \ len = avio_rl32(pb); \ else \ len = 0; \ } while(0) static int asf_read_subpayload(AVFormatContext *s, AVPacket *pkt, int is_header) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint8_t sub_len; int ret, i; if (is_header) { asf->dts_delta = avio_r8(pb); if (asf->nb_mult_left) { asf->mult_sub_len = avio_rl16(pb); // total } asf->sub_header_offset = avio_tell(pb); asf->nb_sub = 0; asf->sub_left = 1; } sub_len = avio_r8(pb); if ((ret = av_get_packet(pb, pkt, sub_len)) < 0) // each subpayload is entire frame return ret; for (i = 0; i < asf->nb_streams; i++) { if (asf->stream_index == asf->asf_st[i]->stream_index) { pkt->stream_index = asf->asf_st[i]->index; break; } } asf->return_subpayload = 1; if (!sub_len) asf->return_subpayload = 0; if (sub_len) asf->nb_sub++; pkt->dts = asf->sub_dts + (asf->nb_sub - 1) * asf->dts_delta - asf->preroll; if (asf->nb_mult_left && (avio_tell(pb) >= (asf->sub_header_offset + asf->mult_sub_len))) { asf->sub_left = 0; asf->nb_mult_left--; } if (avio_tell(pb) >= asf->packet_offset + asf->packet_size - asf->pad_len) { asf->sub_left = 0; if (!asf->nb_mult_left) { avio_skip(pb, asf->pad_len); if (avio_tell(pb) != asf->packet_offset + asf->packet_size) { if (!asf->packet_size) return AVERROR_INVALIDDATA; av_log(s, AV_LOG_WARNING, "Position %"PRId64" wrong, should be %"PRId64"\n", avio_tell(pb), asf->packet_offset + asf->packet_size); avio_seek(pb, asf->packet_offset + asf->packet_size, SEEK_SET); } } } return 0; } static void reset_packet(ASFPacket *asf_pkt) { asf_pkt->size_left = 0; asf_pkt->data_size = 0; asf_pkt->duration = 0; asf_pkt->flags = 0; asf_pkt->dts = 0; asf_pkt->duration = 0; av_packet_unref(&asf_pkt->avpkt); av_init_packet(&asf_pkt->avpkt); } static int asf_read_replicated_data(AVFormatContext *s, ASFPacket *asf_pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int ret, data_size; if (!asf_pkt->data_size) { data_size = avio_rl32(pb); // read media object size if (data_size <= 0) return AVERROR_INVALIDDATA; if ((ret = av_new_packet(&asf_pkt->avpkt, data_size)) < 0) return ret; asf_pkt->data_size = asf_pkt->size_left = data_size; } else avio_skip(pb, 4); // reading of media object size is already done asf_pkt->dts = avio_rl32(pb); // read presentation time if (asf->rep_data_len && (asf->rep_data_len >= 8)) avio_skip(pb, asf->rep_data_len - 8); // skip replicated data return 0; } static int asf_read_multiple_payload(AVFormatContext *s, AVPacket *pkt, ASFPacket *asf_pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint16_t pay_len; unsigned char *p; int ret; int skip = 0; // if replicated length is 1, subpayloads are present if (asf->rep_data_len == 1) { asf->sub_left = 1; asf->state = READ_MULTI_SUB; pkt->flags = asf_pkt->flags; if ((ret = asf_read_subpayload(s, pkt, 1)) < 0) return ret; } else { if (asf->rep_data_len) if ((ret = asf_read_replicated_data(s, asf_pkt)) < 0) return ret; pay_len = avio_rl16(pb); // payload length should be WORD if (pay_len > asf->packet_size) { av_log(s, AV_LOG_ERROR, "Error: invalid data packet size, pay_len %"PRIu16", " "asf->packet_size %"PRIu32", offset %"PRId64".\n", pay_len, asf->packet_size, avio_tell(pb)); return AVERROR_INVALIDDATA; } p = asf_pkt->avpkt.data + asf_pkt->data_size - asf_pkt->size_left; if (pay_len > asf_pkt->size_left) { av_log(s, AV_LOG_ERROR, "Error: invalid buffer size, pay_len %d, data size left %d.\n", pay_len, asf_pkt->size_left); skip = pay_len - asf_pkt->size_left; pay_len = asf_pkt->size_left; } if (asf_pkt->size_left <= 0) return AVERROR_INVALIDDATA; if ((ret = avio_read(pb, p, pay_len)) < 0) return ret; if (s->key && s->keylen == 20) ff_asfcrypt_dec(s->key, p, ret); avio_skip(pb, skip); asf_pkt->size_left -= pay_len; asf->nb_mult_left--; } return 0; } static int asf_read_single_payload(AVFormatContext *s, ASFPacket *asf_pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int64_t offset; uint64_t size; unsigned char *p; int ret, data_size; if (!asf_pkt->data_size) { data_size = avio_rl32(pb); // read media object size if (data_size <= 0) return AVERROR_EOF; if ((ret = av_new_packet(&asf_pkt->avpkt, data_size)) < 0) return ret; asf_pkt->data_size = asf_pkt->size_left = data_size; } else avio_skip(pb, 4); // skip media object size asf_pkt->dts = avio_rl32(pb); // read presentation time if (asf->rep_data_len >= 8) avio_skip(pb, asf->rep_data_len - 8); // skip replicated data offset = avio_tell(pb); // size of the payload - size of the packet without header and padding if (asf->packet_size_internal) size = asf->packet_size_internal - offset + asf->packet_offset - asf->pad_len; else size = asf->packet_size - offset + asf->packet_offset - asf->pad_len; if (size > asf->packet_size) { av_log(s, AV_LOG_ERROR, "Error: invalid data packet size, offset %"PRId64".\n", avio_tell(pb)); return AVERROR_INVALIDDATA; } p = asf_pkt->avpkt.data + asf_pkt->data_size - asf_pkt->size_left; if (size > asf_pkt->size_left || asf_pkt->size_left <= 0) return AVERROR_INVALIDDATA; if (asf_pkt->size_left > size) asf_pkt->size_left -= size; else asf_pkt->size_left = 0; if ((ret = avio_read(pb, p, size)) < 0) return ret; if (s->key && s->keylen == 20) ff_asfcrypt_dec(s->key, p, ret); if (asf->packet_size_internal) avio_skip(pb, asf->packet_size - asf->packet_size_internal); avio_skip(pb, asf->pad_len); // skip padding return 0; } static int asf_read_payload(AVFormatContext *s, AVPacket *pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int ret, i; ASFPacket *asf_pkt = NULL; if (!asf->sub_left) { uint32_t off_len, media_len; uint8_t stream_num; stream_num = avio_r8(pb); asf->stream_index = stream_num & ASF_STREAM_NUM; for (i = 0; i < asf->nb_streams; i++) { if (asf->stream_index == asf->asf_st[i]->stream_index) { asf_pkt = &asf->asf_st[i]->pkt; asf_pkt->stream_index = asf->asf_st[i]->index; break; } } if (!asf_pkt) { if (asf->packet_offset + asf->packet_size <= asf->data_offset + asf->data_size) { if (!asf->packet_size) { av_log(s, AV_LOG_ERROR, "Invalid packet size 0.\n"); return AVERROR_INVALIDDATA; } avio_seek(pb, asf->packet_offset + asf->packet_size, SEEK_SET); av_log(s, AV_LOG_WARNING, "Skipping the stream with the invalid stream index %d.\n", asf->stream_index); return AVERROR(EAGAIN); } else return AVERROR_INVALIDDATA; } if (stream_num >> 7) asf_pkt->flags |= AV_PKT_FLAG_KEY; READ_LEN(asf->prop_flags & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE, ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_, media_len); READ_LEN(asf->prop_flags & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE, ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_, off_len); READ_LEN(asf->prop_flags & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE, ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_, asf->rep_data_len); if (asf_pkt->size_left && (asf_pkt->frame_num != media_len)) { av_log(s, AV_LOG_WARNING, "Unfinished frame will be ignored\n"); reset_packet(asf_pkt); } asf_pkt->frame_num = media_len; asf->sub_dts = off_len; if (asf->nb_mult_left) { if ((ret = asf_read_multiple_payload(s, pkt, asf_pkt)) < 0) return ret; } else if (asf->rep_data_len == 1) { asf->sub_left = 1; asf->state = READ_SINGLE; pkt->flags = asf_pkt->flags; if ((ret = asf_read_subpayload(s, pkt, 1)) < 0) return ret; } else { if ((ret = asf_read_single_payload(s, asf_pkt)) < 0) return ret; } } else { for (i = 0; i <= asf->nb_streams; i++) { if (asf->stream_index == asf->asf_st[i]->stream_index) { asf_pkt = &asf->asf_st[i]->pkt; break; } } if (!asf_pkt) return AVERROR_INVALIDDATA; pkt->flags = asf_pkt->flags; pkt->dts = asf_pkt->dts; pkt->stream_index = asf->asf_st[i]->index; if ((ret = asf_read_subpayload(s, pkt, 0)) < 0) // read subpayload without its header return ret; } return 0; } static int asf_read_packet_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size; uint32_t av_unused seq; unsigned char error_flags, len_flags, pay_flags; asf->packet_offset = avio_tell(pb); error_flags = avio_r8(pb); // read Error Correction Flags if (error_flags & ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT) { if (!(error_flags & ASF_ERROR_CORRECTION_LENGTH_TYPE)) { size = error_flags & ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; avio_skip(pb, size); } len_flags = avio_r8(pb); } else len_flags = error_flags; asf->prop_flags = avio_r8(pb); READ_LEN(len_flags & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE, ASF_PPI_FLAG_PACKET_LENGTH_FIELD_, asf->packet_size_internal); READ_LEN(len_flags & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE, ASF_PPI_FLAG_SEQUENCE_FIELD_, seq); READ_LEN(len_flags & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE, ASF_PPI_FLAG_PADDING_LENGTH_FIELD_, asf->pad_len ); asf->send_time = avio_rl32(pb); // send time avio_skip(pb, 2); // skip duration if (len_flags & ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT) { // Multiple Payloads present pay_flags = avio_r8(pb); asf->nb_mult_left = (pay_flags & ASF_NUM_OF_PAYLOADS); } return 0; } static int asf_deinterleave(AVFormatContext *s, ASFPacket *asf_pkt, int st_num) { ASFContext *asf = s->priv_data; ASFStream *asf_st = asf->asf_st[st_num]; unsigned char *p = asf_pkt->avpkt.data; uint16_t pkt_len = asf->asf_st[st_num]->virtual_pkt_len; uint16_t chunk_len = asf->asf_st[st_num]->virtual_chunk_len; int nchunks = pkt_len / chunk_len; AVPacket pkt; int pos = 0, j, l, ret; if ((ret = av_new_packet(&pkt, asf_pkt->data_size)) < 0) return ret; while (asf_pkt->data_size >= asf_st->span * pkt_len + pos) { if (pos >= asf_pkt->data_size) { break; } for (l = 0; l < pkt_len; l++) { if (pos >= asf_pkt->data_size) { break; } for (j = 0; j < asf_st->span; j++) { if ((pos + chunk_len) >= asf_pkt->data_size) break; memcpy(pkt.data + pos, p + (j * nchunks + l) * chunk_len, chunk_len); pos += chunk_len; } } p += asf_st->span * pkt_len; if (p > asf_pkt->avpkt.data + asf_pkt->data_size) break; } av_packet_unref(&asf_pkt->avpkt); asf_pkt->avpkt = pkt; return 0; } static int asf_read_packet(AVFormatContext *s, AVPacket *pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int ret, i; if ((avio_tell(pb) >= asf->data_offset + asf->data_size) && !(asf->b_flags & ASF_FLAG_BROADCAST)) return AVERROR_EOF; while (!pb->eof_reached) { if (asf->state == PARSE_PACKET_HEADER) { asf_read_packet_header(s); if (pb->eof_reached) break; if (!asf->nb_mult_left) asf->state = READ_SINGLE; else asf->state = READ_MULTI; } ret = asf_read_payload(s, pkt); if (ret == AVERROR(EAGAIN)) { asf->state = PARSE_PACKET_HEADER; continue; } else if (ret < 0) return ret; switch (asf->state) { case READ_SINGLE: if (!asf->sub_left) asf->state = PARSE_PACKET_HEADER; break; case READ_MULTI_SUB: if (!asf->sub_left && !asf->nb_mult_left) { asf->state = PARSE_PACKET_HEADER; if (!asf->return_subpayload && (avio_tell(pb) <= asf->packet_offset + asf->packet_size - asf->pad_len)) avio_skip(pb, asf->pad_len); // skip padding if (asf->packet_offset + asf->packet_size > avio_tell(pb)) avio_seek(pb, asf->packet_offset + asf->packet_size, SEEK_SET); } else if (!asf->sub_left) asf->state = READ_MULTI; break; case READ_MULTI: if (!asf->nb_mult_left) { asf->state = PARSE_PACKET_HEADER; if (!asf->return_subpayload && (avio_tell(pb) <= asf->packet_offset + asf->packet_size - asf->pad_len)) avio_skip(pb, asf->pad_len); // skip padding if (asf->packet_offset + asf->packet_size > avio_tell(pb)) avio_seek(pb, asf->packet_offset + asf->packet_size, SEEK_SET); } break; } if (asf->return_subpayload) { asf->return_subpayload = 0; return 0; } for (i = 0; i < asf->nb_streams; i++) { ASFPacket *asf_pkt = &asf->asf_st[i]->pkt; if (asf_pkt && !asf_pkt->size_left && asf_pkt->data_size) { if (asf->asf_st[i]->span > 1 && asf->asf_st[i]->type == AVMEDIA_TYPE_AUDIO) if ((ret = asf_deinterleave(s, asf_pkt, i)) < 0) return ret; av_packet_move_ref(pkt, &asf_pkt->avpkt); pkt->stream_index = asf->asf_st[i]->index; pkt->flags = asf_pkt->flags; pkt->dts = asf_pkt->dts - asf->preroll; asf_pkt->data_size = 0; asf_pkt->frame_num = 0; return 0; } } } if (pb->eof_reached) return AVERROR_EOF; return 0; } static int asf_read_close(AVFormatContext *s) { ASFContext *asf = s->priv_data; int i; for (i = 0; i < ASF_MAX_STREAMS; i++) { av_dict_free(&asf->asf_sd[i].asf_met); if (i < asf->nb_streams) { av_packet_unref(&asf->asf_st[i]->pkt.avpkt); av_freep(&asf->asf_st[i]); } } asf->nb_streams = 0; return 0; } static void reset_packet_state(AVFormatContext *s) { ASFContext *asf = s->priv_data; int i; asf->state = PARSE_PACKET_HEADER; asf->offset = 0; asf->return_subpayload = 0; asf->sub_left = 0; asf->sub_header_offset = 0; asf->packet_offset = asf->first_packet_offset; asf->pad_len = 0; asf->rep_data_len = 0; asf->dts_delta = 0; asf->mult_sub_len = 0; asf->nb_mult_left = 0; asf->nb_sub = 0; asf->prop_flags = 0; asf->sub_dts = 0; for (i = 0; i < asf->nb_streams; i++) { ASFPacket *pkt = &asf->asf_st[i]->pkt; pkt->size_left = 0; pkt->data_size = 0; pkt->duration = 0; pkt->flags = 0; pkt->dts = 0; pkt->duration = 0; av_packet_unref(&pkt->avpkt); av_init_packet(&pkt->avpkt); } } /* * Find a timestamp for the requested position within the payload * where the pos (position) is the offset inside the Data Object. * When position is not on the packet boundary, asf_read_timestamp tries * to find the closest packet offset after this position. If this packet * is a key frame, this packet timestamp is read and an index entry is created * for the packet. If this packet belongs to the requested stream, * asf_read_timestamp upgrades pos to the packet beginning offset and * returns this packet's dts. So returned dts is the dts of the first key frame with * matching stream number after given position. */ static int64_t asf_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos, int64_t pos_limit) { ASFContext *asf = s->priv_data; int64_t pkt_pos = *pos, pkt_offset, dts = AV_NOPTS_VALUE, data_end; AVPacket pkt; int n; data_end = asf->data_offset + asf->data_size; n = (pkt_pos - asf->first_packet_offset + asf->packet_size - 1) / asf->packet_size; n = av_clip(n, 0, ((data_end - asf->first_packet_offset) / asf->packet_size - 1)); pkt_pos = asf->first_packet_offset + n * asf->packet_size; avio_seek(s->pb, pkt_pos, SEEK_SET); pkt_offset = pkt_pos; reset_packet_state(s); while (avio_tell(s->pb) < data_end) { int i, ret, st_found; av_init_packet(&pkt); pkt_offset = avio_tell(s->pb); if ((ret = asf_read_packet(s, &pkt)) < 0) { dts = AV_NOPTS_VALUE; return ret; } // ASFPacket may contain fragments of packets belonging to different streams, // pkt_offset is the offset of the first fragment within it. if ((pkt_offset >= (pkt_pos + asf->packet_size))) pkt_pos += asf->packet_size; for (i = 0; i < asf->nb_streams; i++) { ASFStream *st = asf->asf_st[i]; st_found = 0; if (pkt.flags & AV_PKT_FLAG_KEY) { dts = pkt.dts; if (dts) { av_add_index_entry(s->streams[pkt.stream_index], pkt_pos, dts, pkt.size, 0, AVINDEX_KEYFRAME); if (stream_index == st->index) { st_found = 1; break; } } } } if (st_found) break; av_packet_unref(&pkt); } *pos = pkt_pos; av_packet_unref(&pkt); return dts; } static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { ASFContext *asf = s->priv_data; int idx, ret; if (s->streams[stream_index]->nb_index_entries && asf->is_simple_index) { idx = av_index_search_timestamp(s->streams[stream_index], timestamp, flags); if (idx < 0 || idx >= s->streams[stream_index]->nb_index_entries) return AVERROR_INVALIDDATA; avio_seek(s->pb, s->streams[stream_index]->index_entries[idx].pos, SEEK_SET); } else { if ((ret = ff_seek_frame_binary(s, stream_index, timestamp, flags)) < 0) return ret; } reset_packet_state(s); return 0; } static const GUIDParseTable *find_guid(ff_asf_guid guid) { int j, ret; const GUIDParseTable *g; swap_guid(guid); g = gdef; for (j = 0; j < FF_ARRAY_ELEMS(gdef); j++) { if (!(ret = memcmp(guid, g->guid, sizeof(g->guid)))) return g; g++; } return NULL; } static int detect_unknown_subobject(AVFormatContext *s, int64_t offset, int64_t size) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; const GUIDParseTable *g = NULL; ff_asf_guid guid; int ret; while (avio_tell(pb) <= offset + size) { if (avio_tell(pb) == asf->offset) break; asf->offset = avio_tell(pb); if ((ret = ff_get_guid(pb, &guid)) < 0) return ret; g = find_guid(guid); if (g) { if ((ret = g->read_object(s, g)) < 0) return ret; } else { GUIDParseTable g2; g2.name = "Unknown"; g2.is_subobject = 1; asf_read_unknown(s, &g2); } } return 0; } static int asf_read_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; const GUIDParseTable *g = NULL; ff_asf_guid guid; int i, ret; uint64_t size; asf->preroll = 0; asf->is_simple_index = 0; ff_get_guid(pb, &guid); if (ff_guidcmp(&guid, &ff_asf_header)) return AVERROR_INVALIDDATA; avio_skip(pb, 8); // skip header object size avio_skip(pb, 6); // skip number of header objects and 2 reserved bytes asf->data_reached = 0; /* 1 is here instead of pb->eof_reached because (when not streaming), Data are skipped * for the first time, * Index object is processed and got eof and then seeking back to the Data is performed. */ while (1) { // for the cases when object size is invalid if (avio_tell(pb) == asf->offset) break; asf->offset = avio_tell(pb); if ((ret = ff_get_guid(pb, &guid)) < 0) { if (ret == AVERROR_EOF && asf->data_reached) break; else goto failed; } g = find_guid(guid); if (g) { asf->unknown_offset = asf->offset; asf->is_header = 1; if ((ret = g->read_object(s, g)) < 0) goto failed; } else { size = avio_rl64(pb); align_position(pb, asf->offset, size); } if (asf->data_reached && (!(pb->seekable & AVIO_SEEKABLE_NORMAL) || (asf->b_flags & ASF_FLAG_BROADCAST))) break; } if (!asf->data_reached) { av_log(s, AV_LOG_ERROR, "Data Object was not found.\n"); ret = AVERROR_INVALIDDATA; goto failed; } if (pb->seekable & AVIO_SEEKABLE_NORMAL) avio_seek(pb, asf->first_packet_offset, SEEK_SET); for (i = 0; i < asf->nb_streams; i++) { const char *rfc1766 = asf->asf_sd[asf->asf_st[i]->lang_idx].langs; AVStream *st = s->streams[asf->asf_st[i]->index]; set_language(s, rfc1766, &st->metadata); } for (i = 0; i < ASF_MAX_STREAMS; i++) { AVStream *st = NULL; st = find_stream(s, i); if (st) { av_dict_copy(&st->metadata, asf->asf_sd[i].asf_met, AV_DICT_IGNORE_SUFFIX); if (asf->asf_sd[i].aspect_ratio.num > 0 && asf->asf_sd[i].aspect_ratio.den > 0) { st->sample_aspect_ratio.num = asf->asf_sd[i].aspect_ratio.num; st->sample_aspect_ratio.den = asf->asf_sd[i].aspect_ratio.den; } } } return 0; failed: asf_read_close(s); return ret; } AVInputFormat ff_asf_o_demuxer = { .name = "asf_o", .long_name = NULL_IF_CONFIG_SMALL("ASF (Advanced / Active Streaming Format)"), .priv_data_size = sizeof(ASFContext), .read_probe = asf_probe, .read_header = asf_read_header, .read_packet = asf_read_packet, .read_close = asf_read_close, .read_timestamp = asf_read_timestamp, .read_seek = asf_read_seek, .flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH, };
./CrossVul/dataset_final_sorted/CWE-119/c/good_464_0
crossvul-cpp_data_bad_676_0
/* * Copyright (C) 2014 Daniel-Constantin Mierla (asipto.com) * * This file is part of kamailio, a free SIP server. * * Kamailio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version * * Kamailio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /*! \file * \brief TMX :: Pretran * * \ingroup tm * - Module: \ref tm */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "../../core/dprint.h" #include "../../core/mem/shm_mem.h" #include "../../core/locking.h" #include "../../core/hashes.h" #include "../../core/config.h" #include "../../core/parser/parse_via.h" #include "../../core/parser/parse_from.h" #include "../../core/route.h" #include "../../core/trim.h" #include "../../core/pt.h" #include "tmx_pretran.h" typedef struct _pretran { unsigned int hid; unsigned int linked; str callid; str ftag; str cseqnum; str cseqmet; unsigned int cseqmetid; str vbranch; str dbuf; int pid; struct _pretran *next; struct _pretran *prev; } pretran_t; typedef struct pretran_slot { pretran_t *plist; gen_lock_t lock; } pretran_slot_t; static pretran_t *_tmx_proc_ptran = NULL; static pretran_slot_t *_tmx_ptran_table = NULL; static int _tmx_ptran_size = 0; /** * */ int tmx_init_pretran_table(void) { int n; int pn; pn = get_max_procs(); if(pn<=0) return -1; if(_tmx_ptran_table!=NULL) return -1; /* get the highest power of two less than number of processes */ n = -1; while (pn >> ++n > 0); n--; if(n<=1) n = 2; if(n>8) n = 8; _tmx_ptran_size = 1<<n; _tmx_ptran_table = (pretran_slot_t*)shm_malloc(_tmx_ptran_size*sizeof(pretran_slot_t)); if(_tmx_ptran_table == NULL) { LM_ERR("not enough shared memory\n"); return -1; } memset(_tmx_ptran_table, 0, _tmx_ptran_size*sizeof(pretran_slot_t)); for(n=0; n<_tmx_ptran_size; n++) { if(lock_init(&_tmx_ptran_table[n].lock)==NULL) { LM_ERR("cannot init the lock %d\n", n); n--; while(n>=0) { lock_destroy(&_tmx_ptran_table[n].lock); n--; } shm_free(_tmx_ptran_table); _tmx_ptran_table = 0; _tmx_ptran_size = 0; return -1; } } return 0; } /** * */ void tmx_pretran_link_safe(int slotid) { if(_tmx_proc_ptran==NULL) return; if(_tmx_ptran_table[slotid].plist==NULL) { _tmx_ptran_table[slotid].plist = _tmx_proc_ptran; _tmx_proc_ptran->linked = 1; return; } _tmx_proc_ptran->next = _tmx_ptran_table[slotid].plist; _tmx_ptran_table[slotid].plist->prev = _tmx_proc_ptran; _tmx_ptran_table[slotid].plist = _tmx_proc_ptran; _tmx_proc_ptran->linked = 1; return; } /** * */ void tmx_pretran_unlink_safe(int slotid) { if(_tmx_proc_ptran==NULL) return; if(_tmx_proc_ptran->linked == 0) return; if(_tmx_ptran_table[slotid].plist==NULL) { _tmx_proc_ptran->prev = _tmx_proc_ptran->next = NULL; _tmx_proc_ptran->linked = 0; return; } if(_tmx_proc_ptran->prev==NULL) { _tmx_ptran_table[slotid].plist = _tmx_proc_ptran->next; if(_tmx_ptran_table[slotid].plist!=NULL) _tmx_ptran_table[slotid].plist->prev = NULL; } else { _tmx_proc_ptran->prev->next = _tmx_proc_ptran->next; if(_tmx_proc_ptran->next) _tmx_proc_ptran->next->prev = _tmx_proc_ptran->prev; } _tmx_proc_ptran->prev = _tmx_proc_ptran->next = NULL; _tmx_proc_ptran->linked = 0; return; } /** * */ void tmx_pretran_unlink(void) { int slotid; if(_tmx_proc_ptran==NULL) return; slotid = _tmx_proc_ptran->hid & (_tmx_ptran_size-1); lock_get(&_tmx_ptran_table[slotid].lock); tmx_pretran_unlink_safe(slotid); lock_release(&_tmx_ptran_table[slotid].lock); } /** * return: * - -1: error * - 0: not found * - 1: found */ int tmx_check_pretran(sip_msg_t *msg) { unsigned int chid; unsigned int slotid; int dsize; struct via_param *vbr; str scallid; str scseqmet; str scseqnum; str sftag; str svbranch = {NULL, 0}; pretran_t *it; if(_tmx_ptran_table==NULL) { LM_ERR("pretran hash table not initialized yet\n"); return -1; } if(get_route_type()!=REQUEST_ROUTE) { LM_ERR("invalid usage - not in request route\n"); return -1; } if(msg->first_line.type!=SIP_REQUEST) { LM_ERR("invalid usage - not a sip request\n"); return -1; } if(parse_headers(msg, HDR_FROM_F|HDR_VIA1_F|HDR_CALLID_F|HDR_CSEQ_F, 0)<0) { LM_ERR("failed to parse required headers\n"); return -1; } if(msg->cseq==NULL || msg->cseq->parsed==NULL) { LM_ERR("failed to parse cseq headers\n"); return -1; } if(get_cseq(msg)->method_id==METHOD_ACK || get_cseq(msg)->method_id==METHOD_CANCEL) { LM_DBG("no pre-transaction management for ACK or CANCEL\n"); return -1; } if (msg->via1==0) { LM_ERR("failed to get Via header\n"); return -1; } if (parse_from_header(msg)<0 || get_from(msg)->tag_value.len==0) { LM_ERR("failed to get From header\n"); return -1; } if (msg->callid==NULL || msg->callid->body.s==NULL) { LM_ERR("failed to parse callid headers\n"); return -1; } vbr = msg->via1->branch; scallid = msg->callid->body; trim(&scallid); scseqmet = get_cseq(msg)->method; trim(&scseqmet); scseqnum = get_cseq(msg)->number; trim(&scseqnum); sftag = get_from(msg)->tag_value; trim(&sftag); chid = get_hash1_raw(msg->callid->body.s, msg->callid->body.len); slotid = chid & (_tmx_ptran_size-1); if(unlikely(_tmx_proc_ptran == NULL)) { _tmx_proc_ptran = (pretran_t*)shm_malloc(sizeof(pretran_t)); if(_tmx_proc_ptran == NULL) { LM_ERR("not enough memory for pretran structure\n"); return -1; } memset(_tmx_proc_ptran, 0, sizeof(pretran_t)); _tmx_proc_ptran->pid = my_pid(); } dsize = scallid.len + scseqnum.len + scseqmet.len + sftag.len + 4; if(likely(vbr!=NULL)) { svbranch = vbr->value; trim(&svbranch); dsize += svbranch.len; } if(dsize<256) dsize = 256; tmx_pretran_unlink(); if(dsize > _tmx_proc_ptran->dbuf.len) { if(_tmx_proc_ptran->dbuf.s) shm_free(_tmx_proc_ptran->dbuf.s); _tmx_proc_ptran->dbuf.s = (char*)shm_malloc(dsize); if(_tmx_proc_ptran->dbuf.s==NULL) { LM_ERR("not enough memory for pretran data\n"); return -1; } _tmx_proc_ptran->dbuf.len = dsize; } _tmx_proc_ptran->hid = chid; _tmx_proc_ptran->cseqmetid = (get_cseq(msg))->method_id; _tmx_proc_ptran->callid.s = _tmx_proc_ptran->dbuf.s; memcpy(_tmx_proc_ptran->callid.s, scallid.s, scallid.len); _tmx_proc_ptran->callid.len = scallid.len; _tmx_proc_ptran->callid.s[_tmx_proc_ptran->callid.len] = '\0'; _tmx_proc_ptran->ftag.s = _tmx_proc_ptran->callid.s + _tmx_proc_ptran->callid.len + 1; memcpy(_tmx_proc_ptran->ftag.s, sftag.s, sftag.len); _tmx_proc_ptran->ftag.len = sftag.len; _tmx_proc_ptran->ftag.s[_tmx_proc_ptran->ftag.len] = '\0'; _tmx_proc_ptran->cseqnum.s = _tmx_proc_ptran->ftag.s + _tmx_proc_ptran->ftag.len + 1; memcpy(_tmx_proc_ptran->cseqnum.s, scseqnum.s, scseqnum.len); _tmx_proc_ptran->cseqnum.len = scseqnum.len; _tmx_proc_ptran->cseqnum.s[_tmx_proc_ptran->cseqnum.len] = '\0'; _tmx_proc_ptran->cseqmet.s = _tmx_proc_ptran->cseqnum.s + _tmx_proc_ptran->cseqnum.len + 1; memcpy(_tmx_proc_ptran->cseqmet.s, scseqmet.s, scseqmet.len); _tmx_proc_ptran->cseqmet.len = scseqmet.len; _tmx_proc_ptran->cseqmet.s[_tmx_proc_ptran->cseqmet.len] = '\0'; if(likely(vbr!=NULL)) { _tmx_proc_ptran->vbranch.s = _tmx_proc_ptran->cseqmet.s + _tmx_proc_ptran->cseqmet.len + 1; memcpy(_tmx_proc_ptran->vbranch.s, svbranch.s, svbranch.len); _tmx_proc_ptran->vbranch.len = svbranch.len; _tmx_proc_ptran->vbranch.s[_tmx_proc_ptran->vbranch.len] = '\0'; } else { _tmx_proc_ptran->vbranch.s = NULL; _tmx_proc_ptran->vbranch.len = 0; } lock_get(&_tmx_ptran_table[slotid].lock); it = _tmx_ptran_table[slotid].plist; tmx_pretran_link_safe(slotid); for(; it!=NULL; it=it->next) { if(_tmx_proc_ptran->hid != it->hid || _tmx_proc_ptran->cseqmetid != it->cseqmetid || _tmx_proc_ptran->callid.len != it->callid.len || _tmx_proc_ptran->ftag.len != it->ftag.len || _tmx_proc_ptran->cseqmet.len != it->cseqmet.len || _tmx_proc_ptran->cseqnum.len != it->cseqnum.len) continue; if(_tmx_proc_ptran->vbranch.s != NULL && it->vbranch.s != NULL) { if(_tmx_proc_ptran->vbranch.len != it->vbranch.len) continue; /* shortcut - check last char in Via branch * - kamailio/ser adds there branch index => in case of paralel * forking by previous hop, catch it here quickly */ if(_tmx_proc_ptran->vbranch.s[it->vbranch.len-1] != it->vbranch.s[it->vbranch.len-1]) continue; if(memcmp(_tmx_proc_ptran->vbranch.s, it->vbranch.s, it->vbranch.len)!=0) continue; /* shall stop by matching magic cookie? * if (vbr && vbr->value.s && vbr->value.len > MCOOKIE_LEN * && memcmp(vbr->value.s, MCOOKIE, MCOOKIE_LEN)==0) { * LM_DBG("rfc3261 cookie found in Via branch\n"); * } */ } if(memcmp(_tmx_proc_ptran->callid.s, it->callid.s, it->callid.len)!=0 || memcmp(_tmx_proc_ptran->ftag.s, it->ftag.s, it->ftag.len)!=0 || memcmp(_tmx_proc_ptran->cseqnum.s, it->cseqnum.s, it->cseqnum.len)!=0) continue; if((it->cseqmetid==METHOD_OTHER || it->cseqmetid==METHOD_UNDEF) && memcmp(_tmx_proc_ptran->cseqmet.s, it->cseqmet.s, it->cseqmet.len)!=0) continue; LM_DBG("matched another pre-transaction by pid %d for [%.*s]\n", it->pid, it->callid.len, it->callid.s); lock_release(&_tmx_ptran_table[slotid].lock); return 1; } lock_release(&_tmx_ptran_table[slotid].lock); return 0; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_676_0
crossvul-cpp_data_good_550_0
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "opj_apps_config.h" #include "openjpeg.h" #include "color.h" #ifdef OPJ_HAVE_LIBLCMS2 #include <lcms2.h> #endif #ifdef OPJ_HAVE_LIBLCMS1 #include <lcms.h> #endif #ifdef OPJ_USE_LEGACY #define OPJ_CLRSPC_GRAY CLRSPC_GRAY #define OPJ_CLRSPC_SRGB CLRSPC_SRGB #endif /*-------------------------------------------------------- Matrix for sYCC, Amendment 1 to IEC 61966-2-1 Y : 0.299 0.587 0.114 :R Cb: -0.1687 -0.3312 0.5 :G Cr: 0.5 -0.4187 -0.0812 :B Inverse: R: 1 -3.68213e-05 1.40199 :Y G: 1.00003 -0.344125 -0.714128 :Cb - 2^(prec - 1) B: 0.999823 1.77204 -8.04142e-06 :Cr - 2^(prec - 1) -----------------------------------------------------------*/ static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr, int *out_r, int *out_g, int *out_b) { int r, g, b; cb -= offset; cr -= offset; r = y + (int)(1.402 * (float)cr); if (r < 0) { r = 0; } else if (r > upb) { r = upb; } *out_r = r; g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr); if (g < 0) { g = 0; } else if (g > upb) { g = upb; } *out_g = g; b = y + (int)(1.772 * (float)cb); if (b < 0) { b = 0; } else if (b > upb) { b = upb; } *out_b = b; } static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1 << (upb - 1); upb = (1 << upb) - 1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)opj_image_data_alloc(sizeof(int) * max); d1 = g = (int*)opj_image_data_alloc(sizeof(int) * max); d2 = b = (int*)opj_image_data_alloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) { goto fails; } for (i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } opj_image_data_free(img->comps[0].data); img->comps[0].data = d0; opj_image_data_free(img->comps[1].data); img->comps[1].data = d1; opj_image_data_free(img->comps[2].data); img->comps[2].data = d2; img->color_space = OPJ_CLRSPC_SRGB; return; fails: opj_image_data_free(r); opj_image_data_free(g); opj_image_data_free(b); }/* sycc444_to_rgb() */ static void sycc422_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, offx, loopmaxw; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1 << (upb - 1); upb = (1 << upb) - 1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)opj_image_data_alloc(sizeof(int) * max); d1 = g = (int*)opj_image_data_alloc(sizeof(int) * max); d2 = b = (int*)opj_image_data_alloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) { goto fails; } /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; for (i = 0U; i < maxh; ++i) { size_t j; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } for (j = 0U; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } } opj_image_data_free(img->comps[0].data); img->comps[0].data = d0; opj_image_data_free(img->comps[1].data); img->comps[1].data = d1; opj_image_data_free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: opj_image_data_free(r); opj_image_data_free(g); opj_image_data_free(b); }/* sycc422_to_rgb() */ static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; size_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1 << (upb - 1); upb = (1 << upb) - 1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)opj_image_data_alloc(sizeof(int) * max); d1 = g = (int*)opj_image_data_alloc(sizeof(int) * max); d2 = b = (int*)opj_image_data_alloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) { goto fails; } /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; /* if img->y0 is odd, then first line shall use Cb/Cr = 0 */ offy = img->y0 & 1U; loopmaxh = maxh - offy; if (offy > 0U) { size_t j; for (j = 0; j < maxw; ++j) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } } for (i = 0U; i < (loopmaxh & ~(size_t)1U); i += 2U) { size_t j; ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; } for (j = 0; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if (j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if (i < loopmaxh) { size_t j; for (j = 0U; j < (maxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } opj_image_data_free(img->comps[0].data); img->comps[0].data = d0; opj_image_data_free(img->comps[1].data); img->comps[1].data = d1; opj_image_data_free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: opj_image_data_free(r); opj_image_data_free(g); opj_image_data_free(b); }/* sycc420_to_rgb() */ void color_sycc_to_rgb(opj_image_t *img) { if (img->numcomps < 3) { img->color_space = OPJ_CLRSPC_GRAY; return; } if ((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 2) && (img->comps[2].dy == 2)) { /* horizontal and vertical sub-sample */ sycc420_to_rgb(img); } else if ((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1)) { /* horizontal sub-sample only */ sycc422_to_rgb(img); } else if ((img->comps[0].dx == 1) && (img->comps[1].dx == 1) && (img->comps[2].dx == 1) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1)) { /* no sub-sample */ sycc444_to_rgb(img); } else { fprintf(stderr, "%s:%d:color_sycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__, __LINE__); return; } }/* color_sycc_to_rgb() */ #if defined(OPJ_HAVE_LIBLCMS2) || defined(OPJ_HAVE_LIBLCMS1) #ifdef OPJ_HAVE_LIBLCMS1 /* Bob Friesenhahn proposed:*/ #define cmsSigXYZData icSigXYZData #define cmsSigLabData icSigLabData #define cmsSigCmykData icSigCmykData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLuvData icSigLuvData #define cmsSigGrayData icSigGrayData #define cmsSigRgbData icSigRgbData #define cmsUInt32Number DWORD #define cmsColorSpaceSignature icColorSpaceSignature #define cmsGetHeaderRenderingIntent cmsTakeRenderingIntent #endif /* OPJ_HAVE_LIBLCMS1 */ /*#define DEBUG_PROFILE*/ void color_apply_icc_profile(opj_image_t *image) { cmsHPROFILE in_prof, out_prof; cmsHTRANSFORM transform; cmsColorSpaceSignature in_space, out_space; cmsUInt32Number intent, in_type, out_type; int *r, *g, *b; size_t nr_samples, i, max, max_w, max_h; int prec, ok = 0; OPJ_COLOR_SPACE new_space; in_prof = cmsOpenProfileFromMem(image->icc_profile_buf, image->icc_profile_len); #ifdef DEBUG_PROFILE FILE *icm = fopen("debug.icm", "wb"); fwrite(image->icc_profile_buf, 1, image->icc_profile_len, icm); fclose(icm); #endif if (in_prof == NULL) { return; } in_space = cmsGetPCS(in_prof); out_space = cmsGetColorSpace(in_prof); intent = cmsGetHeaderRenderingIntent(in_prof); max_w = image->comps[0].w; max_h = image->comps[0].h; prec = (int)image->comps[0].prec; if (out_space == cmsSigRgbData) { /* enumCS 16 */ unsigned int i, nr_comp = image->numcomps; if (nr_comp > 4) { nr_comp = 4; } for (i = 1; i < nr_comp; ++i) { /* AFL test */ if (image->comps[0].dx != image->comps[i].dx) { break; } if (image->comps[0].dy != image->comps[i].dy) { break; } if (image->comps[0].prec != image->comps[i].prec) { break; } if (image->comps[0].sgnd != image->comps[i].sgnd) { break; } } if (i != nr_comp) { cmsCloseProfile(in_prof); return; } if (prec <= 8) { in_type = TYPE_RGB_8; out_type = TYPE_RGB_8; } else { in_type = TYPE_RGB_16; out_type = TYPE_RGB_16; } out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if (out_space == cmsSigGrayData) { /* enumCS 17 */ in_type = TYPE_GRAY_8; out_type = TYPE_RGB_8; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if (out_space == cmsSigYCbCrData) { /* enumCS 18 */ in_type = TYPE_YCbCr_16; out_type = TYPE_RGB_16; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else { #ifdef DEBUG_PROFILE fprintf(stderr, "%s:%d: color_apply_icc_profile\n\tICC Profile has unknown " "output colorspace(%#x)(%c%c%c%c)\n\tICC Profile ignored.\n", __FILE__, __LINE__, out_space, (out_space >> 24) & 0xff, (out_space >> 16) & 0xff, (out_space >> 8) & 0xff, out_space & 0xff); #endif cmsCloseProfile(in_prof); return; } if (out_prof == NULL) { cmsCloseProfile(in_prof); return; } #ifdef DEBUG_PROFILE fprintf(stderr, "%s:%d:color_apply_icc_profile\n\tchannels(%d) prec(%d) w(%d) h(%d)" "\n\tprofile: in(%p) out(%p)\n", __FILE__, __LINE__, image->numcomps, prec, max_w, max_h, (void*)in_prof, (void*)out_prof); fprintf(stderr, "\trender_intent (%u)\n\t" "color_space: in(%#x)(%c%c%c%c) out:(%#x)(%c%c%c%c)\n\t" " type: in(%u) out:(%u)\n", intent, in_space, (in_space >> 24) & 0xff, (in_space >> 16) & 0xff, (in_space >> 8) & 0xff, in_space & 0xff, out_space, (out_space >> 24) & 0xff, (out_space >> 16) & 0xff, (out_space >> 8) & 0xff, out_space & 0xff, in_type, out_type ); #else (void)prec; (void)in_space; #endif /* DEBUG_PROFILE */ transform = cmsCreateTransform(in_prof, in_type, out_prof, out_type, intent, 0); #ifdef OPJ_HAVE_LIBLCMS2 /* Possible for: LCMS_VERSION >= 2000 :*/ cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if (transform == NULL) { #ifdef DEBUG_PROFILE fprintf(stderr, "%s:%d:color_apply_icc_profile\n\tcmsCreateTransform failed. " "ICC Profile ignored.\n", __FILE__, __LINE__); #endif #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif return; } if (image->numcomps > 2) { /* RGB, RGBA */ if ((image->comps[0].w == image->comps[1].w && image->comps[0].w == image->comps[2].w) && (image->comps[0].h == image->comps[1].h && image->comps[0].h == image->comps[2].h)) { if (prec <= 8) { unsigned char *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3U * sizeof(unsigned char)); in = inbuf = (unsigned char*)opj_image_data_alloc(nr_samples); out = outbuf = (unsigned char*)opj_image_data_alloc(nr_samples); if (inbuf == NULL || outbuf == NULL) { goto fails0; } r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0U; i < max; ++i) { *in++ = (unsigned char) * r++; *in++ = (unsigned char) * g++; *in++ = (unsigned char) * b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0U; i < max; ++i) { *r++ = (int) * out++; *g++ = (int) * out++; *b++ = (int) * out++; } ok = 1; fails0: opj_image_data_free(inbuf); opj_image_data_free(outbuf); } else { /* prec > 8 */ unsigned short *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3U * sizeof(unsigned short)); in = inbuf = (unsigned short*)opj_image_data_alloc(nr_samples); out = outbuf = (unsigned short*)opj_image_data_alloc(nr_samples); if (inbuf == NULL || outbuf == NULL) { goto fails1; } r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0U ; i < max; ++i) { *in++ = (unsigned short) * r++; *in++ = (unsigned short) * g++; *in++ = (unsigned short) * b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0; i < max; ++i) { *r++ = (int) * out++; *g++ = (int) * out++; *b++ = (int) * out++; } ok = 1; fails1: opj_image_data_free(inbuf); opj_image_data_free(outbuf); } } else { fprintf(stderr, "[ERROR] Image components should have the same width and height\n"); cmsDeleteTransform(transform); return; } } else { /* image->numcomps <= 2 : GRAY, GRAYA */ if (prec <= 8) { unsigned char *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)opj_image_data_alloc(nr_samples); out = outbuf = (unsigned char*)opj_image_data_alloc(nr_samples); g = (int*)opj_image_data_alloc((size_t)max * sizeof(int)); b = (int*)opj_image_data_alloc((size_t)max * sizeof(int)); if (inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) { goto fails2; } new_comps = (opj_image_comp_t*)realloc(image->comps, (image->numcomps + 2) * sizeof(opj_image_comp_t)); if (new_comps == NULL) { goto fails2; } image->comps = new_comps; if (image->numcomps == 2) { image->comps[3] = image->comps[1]; } image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for (i = 0U; i < max; ++i) { *in++ = (unsigned char) * r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0U; i < max; ++i) { *r++ = (int) * out++; *g++ = (int) * out++; *b++ = (int) * out++; } r = g = b = NULL; ok = 1; fails2: opj_image_data_free(inbuf); opj_image_data_free(outbuf); opj_image_data_free(g); opj_image_data_free(b); } else { /* prec > 8 */ unsigned short *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3U * sizeof(unsigned short)); in = inbuf = (unsigned short*)opj_image_data_alloc(nr_samples); out = outbuf = (unsigned short*)opj_image_data_alloc(nr_samples); g = (int*)opj_image_data_alloc((size_t)max * sizeof(int)); b = (int*)opj_image_data_alloc((size_t)max * sizeof(int)); if (inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) { goto fails3; } new_comps = (opj_image_comp_t*)realloc(image->comps, (image->numcomps + 2) * sizeof(opj_image_comp_t)); if (new_comps == NULL) { goto fails3; } image->comps = new_comps; if (image->numcomps == 2) { image->comps[3] = image->comps[1]; } image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for (i = 0U; i < max; ++i) { *in++ = (unsigned short) * r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for (i = 0; i < max; ++i) { *r++ = (int) * out++; *g++ = (int) * out++; *b++ = (int) * out++; } r = g = b = NULL; ok = 1; fails3: opj_image_data_free(inbuf); opj_image_data_free(outbuf); opj_image_data_free(g); opj_image_data_free(b); } }/* if(image->numcomps > 2) */ cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if (ok) { image->color_space = new_space; } }/* color_apply_icc_profile() */ static int are_comps_same_dimensions(opj_image_t * image) { unsigned int i; for (i = 1; i < image->numcomps; i++) { if (image->comps[0].dx != image->comps[i].dx || image->comps[0].dy != image->comps[i].dy) { return OPJ_FALSE; } } return OPJ_TRUE; } void color_cielab_to_rgb(opj_image_t *image) { int *row; int enumcs, numcomps; OPJ_COLOR_SPACE new_space; numcomps = (int)image->numcomps; if (numcomps != 3) { fprintf(stderr, "%s:%d:\n\tnumcomps %d not handled. Quitting.\n", __FILE__, __LINE__, numcomps); return; } if (!are_comps_same_dimensions(image)) { fprintf(stderr, "%s:%d:\n\tcomponents are not all of the same dimension. Quitting.\n", __FILE__, __LINE__); return; } row = (int*)image->icc_profile_buf; enumcs = row[0]; if (enumcs == 14) { /* CIELab */ int *L, *a, *b, *red, *green, *blue; int *src0, *src1, *src2, *dst0, *dst1, *dst2; double rl, ol, ra, oa, rb, ob, prec0, prec1, prec2; double minL, maxL, mina, maxa, minb, maxb; unsigned int default_type; unsigned int i, max; cmsHPROFILE in, out; cmsHTRANSFORM transform; cmsUInt16Number RGB[3]; cmsCIELab Lab; in = cmsCreateLab4Profile(NULL); if (in == NULL) { return; } out = cmsCreate_sRGBProfile(); if (out == NULL) { cmsCloseProfile(in); return; } transform = cmsCreateTransform(in, TYPE_Lab_DBL, out, TYPE_RGB_16, INTENT_PERCEPTUAL, 0); #ifdef OPJ_HAVE_LIBLCMS2 cmsCloseProfile(in); cmsCloseProfile(out); #endif if (transform == NULL) { #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif return; } new_space = OPJ_CLRSPC_SRGB; prec0 = (double)image->comps[0].prec; prec1 = (double)image->comps[1].prec; prec2 = (double)image->comps[2].prec; default_type = (unsigned int)row[1]; if (default_type == 0x44454600) { /* DEF : default */ rl = 100; ra = 170; rb = 200; ol = 0; oa = pow(2, prec1 - 1); ob = pow(2, prec2 - 2) + pow(2, prec2 - 3); } else { rl = row[2]; ra = row[4]; rb = row[6]; ol = row[3]; oa = row[5]; ob = row[7]; } L = src0 = image->comps[0].data; a = src1 = image->comps[1].data; b = src2 = image->comps[2].data; max = image->comps[0].w * image->comps[0].h; red = dst0 = (int*)opj_image_data_alloc(max * sizeof(int)); green = dst1 = (int*)opj_image_data_alloc(max * sizeof(int)); blue = dst2 = (int*)opj_image_data_alloc(max * sizeof(int)); if (red == NULL || green == NULL || blue == NULL) { goto fails; } minL = -(rl * ol) / (pow(2, prec0) - 1); maxL = minL + rl; mina = -(ra * oa) / (pow(2, prec1) - 1); maxa = mina + ra; minb = -(rb * ob) / (pow(2, prec2) - 1); maxb = minb + rb; for (i = 0; i < max; ++i) { Lab.L = minL + (double)(*L) * (maxL - minL) / (pow(2, prec0) - 1); ++L; Lab.a = mina + (double)(*a) * (maxa - mina) / (pow(2, prec1) - 1); ++a; Lab.b = minb + (double)(*b) * (maxb - minb) / (pow(2, prec2) - 1); ++b; cmsDoTransform(transform, &Lab, RGB, 1); *red++ = RGB[0]; *green++ = RGB[1]; *blue++ = RGB[2]; } cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif opj_image_data_free(src0); image->comps[0].data = dst0; opj_image_data_free(src1); image->comps[1].data = dst1; opj_image_data_free(src2); image->comps[2].data = dst2; image->color_space = new_space; image->comps[0].prec = 16; image->comps[1].prec = 16; image->comps[2].prec = 16; return; fails: cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif if (red) { opj_image_data_free(red); } if (green) { opj_image_data_free(green); } if (blue) { opj_image_data_free(blue); } return; } fprintf(stderr, "%s:%d:\n\tenumCS %d not handled. Ignoring.\n", __FILE__, __LINE__, enumcs); }/* color_cielab_to_rgb() */ #endif /* OPJ_HAVE_LIBLCMS2 || OPJ_HAVE_LIBLCMS1 */ void color_cmyk_to_rgb(opj_image_t *image) { float C, M, Y, K; float sC, sM, sY, sK; unsigned int w, h, max, i; w = image->comps[0].w; h = image->comps[0].h; if ( (image->numcomps < 4) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dx != image->comps[3].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) || (image->comps[0].dy != image->comps[3].dy) ) { fprintf(stderr, "%s:%d:color_cmyk_to_rgb\n\tCAN NOT CONVERT\n", __FILE__, __LINE__); return; } max = w * h; sC = 1.0F / (float)((1 << image->comps[0].prec) - 1); sM = 1.0F / (float)((1 << image->comps[1].prec) - 1); sY = 1.0F / (float)((1 << image->comps[2].prec) - 1); sK = 1.0F / (float)((1 << image->comps[3].prec) - 1); for (i = 0; i < max; ++i) { /* CMYK values from 0 to 1 */ C = (float)(image->comps[0].data[i]) * sC; M = (float)(image->comps[1].data[i]) * sM; Y = (float)(image->comps[2].data[i]) * sY; K = (float)(image->comps[3].data[i]) * sK; /* Invert all CMYK values */ C = 1.0F - C; M = 1.0F - M; Y = 1.0F - Y; K = 1.0F - K; /* CMYK -> RGB : RGB results from 0 to 255 */ image->comps[0].data[i] = (int)(255.0F * C * K); /* R */ image->comps[1].data[i] = (int)(255.0F * M * K); /* G */ image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */ } opj_image_data_free(image->comps[3].data); image->comps[3].data = NULL; image->comps[0].prec = 8; image->comps[1].prec = 8; image->comps[2].prec = 8; image->numcomps -= 1; image->color_space = OPJ_CLRSPC_SRGB; for (i = 3; i < image->numcomps; ++i) { memcpy(&(image->comps[i]), &(image->comps[i + 1]), sizeof(image->comps[i])); } }/* color_cmyk_to_rgb() */ /* * This code has been adopted from sjpx_openjpeg.c of ghostscript */ void color_esycc_to_rgb(opj_image_t *image) { int y, cb, cr, sign1, sign2, val; unsigned int w, h, max, i; int flip_value = (1 << (image->comps[0].prec - 1)); int max_value = (1 << image->comps[0].prec) - 1; if ( (image->numcomps < 3) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) ) { fprintf(stderr, "%s:%d:color_esycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__, __LINE__); return; } w = image->comps[0].w; h = image->comps[0].h; sign1 = (int)image->comps[1].sgnd; sign2 = (int)image->comps[2].sgnd; max = w * h; for (i = 0; i < max; ++i) { y = image->comps[0].data[i]; cb = image->comps[1].data[i]; cr = image->comps[2].data[i]; if (!sign1) { cb -= flip_value; } if (!sign2) { cr -= flip_value; } val = (int) ((float)y - (float)0.0000368 * (float)cb + (float)1.40199 * (float)cr + (float)0.5); if (val > max_value) { val = max_value; } else if (val < 0) { val = 0; } image->comps[0].data[i] = val; val = (int) ((float)1.0003 * (float)y - (float)0.344125 * (float)cb - (float)0.7141128 * (float)cr + (float)0.5); if (val > max_value) { val = max_value; } else if (val < 0) { val = 0; } image->comps[1].data[i] = val; val = (int) ((float)0.999823 * (float)y + (float)1.77204 * (float)cb - (float)0.000008 * (float)cr + (float)0.5); if (val > max_value) { val = max_value; } else if (val < 0) { val = 0; } image->comps[2].data[i] = val; } image->color_space = OPJ_CLRSPC_SRGB; }/* color_esycc_to_rgb() */
./CrossVul/dataset_final_sorted/CWE-119/c/good_550_0
crossvul-cpp_data_bad_5475_1
/* $Id$ */ /* * Copyright (c) 1996-1997 Sam Leffler * Copyright (c) 1996 Pixar * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Pixar, Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Pixar, Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL PIXAR, SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tiffiop.h" #ifdef PIXARLOG_SUPPORT /* * TIFF Library. * PixarLog Compression Support * * Contributed by Dan McCoy. * * PixarLog film support uses the TIFF library to store companded * 11 bit values into a tiff file, which are compressed using the * zip compressor. * * The codec can take as input and produce as output 32-bit IEEE float values * as well as 16-bit or 8-bit unsigned integer values. * * On writing any of the above are converted into the internal * 11-bit log format. In the case of 8 and 16 bit values, the * input is assumed to be unsigned linear color values that represent * the range 0-1. In the case of IEEE values, the 0-1 range is assumed to * be the normal linear color range, in addition over 1 values are * accepted up to a value of about 25.0 to encode "hot" highlights and such. * The encoding is lossless for 8-bit values, slightly lossy for the * other bit depths. The actual color precision should be better * than the human eye can perceive with extra room to allow for * error introduced by further image computation. As with any quantized * color format, it is possible to perform image calculations which * expose the quantization error. This format should certainly be less * susceptible to such errors than standard 8-bit encodings, but more * susceptible than straight 16-bit or 32-bit encodings. * * On reading the internal format is converted to the desired output format. * The program can request which format it desires by setting the internal * pseudo tag TIFFTAG_PIXARLOGDATAFMT to one of these possible values: * PIXARLOGDATAFMT_FLOAT = provide IEEE float values. * PIXARLOGDATAFMT_16BIT = provide unsigned 16-bit integer values * PIXARLOGDATAFMT_8BIT = provide unsigned 8-bit integer values * * alternately PIXARLOGDATAFMT_8BITABGR provides unsigned 8-bit integer * values with the difference that if there are exactly three or four channels * (rgb or rgba) it swaps the channel order (bgr or abgr). * * PIXARLOGDATAFMT_11BITLOG provides the internal encoding directly * packed in 16-bit values. However no tools are supplied for interpreting * these values. * * "hot" (over 1.0) areas written in floating point get clamped to * 1.0 in the integer data types. * * When the file is closed after writing, the bit depth and sample format * are set always to appear as if 8-bit data has been written into it. * That way a naive program unaware of the particulars of the encoding * gets the format it is most likely able to handle. * * The codec does it's own horizontal differencing step on the coded * values so the libraries predictor stuff should be turned off. * The codec also handle byte swapping the encoded values as necessary * since the library does not have the information necessary * to know the bit depth of the raw unencoded buffer. * * NOTE: This decoder does not appear to update tif_rawcp, and tif_rawcc. * This can cause problems with the implementation of CHUNKY_STRIP_READ_SUPPORT * as noted in http://trac.osgeo.org/gdal/ticket/3894. FrankW - Jan'11 */ #include "tif_predict.h" #include "zlib.h" #include <stdio.h> #include <stdlib.h> #include <math.h> /* Tables for converting to/from 11 bit coded values */ #define TSIZE 2048 /* decode table size (11-bit tokens) */ #define TSIZEP1 2049 /* Plus one for slop */ #define ONE 1250 /* token value of 1.0 exactly */ #define RATIO 1.004 /* nominal ratio for log part */ #define CODE_MASK 0x7ff /* 11 bits. */ static float Fltsize; static float LogK1, LogK2; #define REPEAT(n, op) { int i; i=n; do { i--; op; } while (i>0); } static void horizontalAccumulateF(uint16 *wp, int n, int stride, float *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)]; t1 = ToLinearF[cg = (wp[1] & mask)]; t2 = ToLinearF[cb = (wp[2] & mask)]; t3 = ToLinearF[ca = (wp[3] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask]; t1 = ToLinearF[(cg += wp[1]) & mask]; t2 = ToLinearF[(cb += wp[2]) & mask]; t3 = ToLinearF[(ca += wp[3]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinearF[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate12(uint16 *wp, int n, int stride, int16 *op, float *ToLinearF) { register unsigned int cr, cg, cb, ca, mask; register float t0, t1, t2, t3; #define SCALE12 2048.0F #define CLAMP12(t) (((t) < 3071) ? (uint16) (t) : 3071) if (n >= stride) { mask = CODE_MASK; if (stride == 3) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); } } else if (stride == 4) { t0 = ToLinearF[cr = (wp[0] & mask)] * SCALE12; t1 = ToLinearF[cg = (wp[1] & mask)] * SCALE12; t2 = ToLinearF[cb = (wp[2] & mask)] * SCALE12; t3 = ToLinearF[ca = (wp[3] & mask)] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; t0 = ToLinearF[(cr += wp[0]) & mask] * SCALE12; t1 = ToLinearF[(cg += wp[1]) & mask] * SCALE12; t2 = ToLinearF[(cb += wp[2]) & mask] * SCALE12; t3 = ToLinearF[(ca += wp[3]) & mask] * SCALE12; op[0] = CLAMP12(t0); op[1] = CLAMP12(t1); op[2] = CLAMP12(t2); op[3] = CLAMP12(t3); } } else { REPEAT(stride, t0 = ToLinearF[*wp&mask] * SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; t0 = ToLinearF[wp[stride]&mask]*SCALE12; *op = CLAMP12(t0); wp++; op++) n -= stride; } } } } static void horizontalAccumulate16(uint16 *wp, int n, int stride, uint16 *op, uint16 *ToLinear16) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear16[cr = (wp[0] & mask)]; op[1] = ToLinear16[cg = (wp[1] & mask)]; op[2] = ToLinear16[cb = (wp[2] & mask)]; op[3] = ToLinear16[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = ToLinear16[(cr += wp[0]) & mask]; op[1] = ToLinear16[(cg += wp[1]) & mask]; op[2] = ToLinear16[(cb += wp[2]) & mask]; op[3] = ToLinear16[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear16[*wp&mask]; wp++; op++) n -= stride; } } } } /* * Returns the log encoded 11-bit values with the horizontal * differencing undone. */ static void horizontalAccumulate11(uint16 *wp, int n, int stride, uint16 *op) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; cr = wp[0]; cg = wp[1]; cb = wp[2]; n -= 3; while (n > 0) { wp += 3; op += 3; n -= 3; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); } } else if (stride == 4) { op[0] = wp[0]; op[1] = wp[1]; op[2] = wp[2]; op[3] = wp[3]; cr = wp[0]; cg = wp[1]; cb = wp[2]; ca = wp[3]; n -= 4; while (n > 0) { wp += 4; op += 4; n -= 4; op[0] = (uint16)((cr += wp[0]) & mask); op[1] = (uint16)((cg += wp[1]) & mask); op[2] = (uint16)((cb += wp[2]) & mask); op[3] = (uint16)((ca += wp[3]) & mask); } } else { REPEAT(stride, *op = *wp&mask; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = *wp&mask; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 3; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; } } else if (stride == 4) { op[0] = ToLinear8[cr = (wp[0] & mask)]; op[1] = ToLinear8[cg = (wp[1] & mask)]; op[2] = ToLinear8[cb = (wp[2] & mask)]; op[3] = ToLinear8[ca = (wp[3] & mask)]; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; op[0] = ToLinear8[(cr += wp[0]) & mask]; op[1] = ToLinear8[(cg += wp[1]) & mask]; op[2] = ToLinear8[(cb += wp[2]) & mask]; op[3] = ToLinear8[(ca += wp[3]) & mask]; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } static void horizontalAccumulate8abgr(uint16 *wp, int n, int stride, unsigned char *op, unsigned char *ToLinear8) { register unsigned int cr, cg, cb, ca, mask; register unsigned char t0, t1, t2, t3; if (n >= stride) { mask = CODE_MASK; if (stride == 3) { op[0] = 0; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[1] = t1; op[2] = t2; op[3] = t3; n -= 3; while (n > 0) { n -= 3; wp += 3; op += 4; op[0] = 0; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[1] = t1; op[2] = t2; op[3] = t3; } } else if (stride == 4) { t0 = ToLinear8[ca = (wp[3] & mask)]; t1 = ToLinear8[cb = (wp[2] & mask)]; t2 = ToLinear8[cg = (wp[1] & mask)]; t3 = ToLinear8[cr = (wp[0] & mask)]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; n -= 4; while (n > 0) { n -= 4; wp += 4; op += 4; t0 = ToLinear8[(ca += wp[3]) & mask]; t1 = ToLinear8[(cb += wp[2]) & mask]; t2 = ToLinear8[(cg += wp[1]) & mask]; t3 = ToLinear8[(cr += wp[0]) & mask]; op[0] = t0; op[1] = t1; op[2] = t2; op[3] = t3; } } else { REPEAT(stride, *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; while (n > 0) { REPEAT(stride, wp[stride] += *wp; *op = ToLinear8[*wp&mask]; wp++; op++) n -= stride; } } } } /* * State block for each open TIFF * file using PixarLog compression/decompression. */ typedef struct { TIFFPredictorState predict; z_stream stream; tmsize_t tbuf_size; /* only set/used on reading for now */ uint16 *tbuf; uint16 stride; int state; int user_datafmt; int quality; #define PLSTATE_INIT 1 TIFFVSetMethod vgetparent; /* super-class method */ TIFFVSetMethod vsetparent; /* super-class method */ float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; } PixarLogState; static int PixarLogMakeTables(PixarLogState *sp) { /* * We make several tables here to convert between various external * representations (float, 16-bit, and 8-bit) and the internal * 11-bit companded representation. The 11-bit representation has two * distinct regions. A linear bottom end up through .018316 in steps * of about .000073, and a region of constant ratio up to about 25. * These floating point numbers are stored in the main table ToLinearF. * All other tables are derived from this one. The tables (and the * ratios) are continuous at the internal seam. */ int nlin, lt2size; int i, j; double b, c, linstep, v; float *ToLinearF; uint16 *ToLinear16; unsigned char *ToLinear8; uint16 *FromLT2; uint16 *From14; /* Really for 16-bit data, but we shift down 2 */ uint16 *From8; c = log(RATIO); nlin = (int)(1./c); /* nlin must be an integer */ c = 1./nlin; b = exp(-c*ONE); /* multiplicative scale factor [b*exp(c*ONE) = 1] */ linstep = b*c*exp(1.); LogK1 = (float)(1./c); /* if (v >= 2) token = k1*log(v*k2) */ LogK2 = (float)(1./b); lt2size = (int)(2./linstep) + 1; FromLT2 = (uint16 *)_TIFFmalloc(lt2size*sizeof(uint16)); From14 = (uint16 *)_TIFFmalloc(16384*sizeof(uint16)); From8 = (uint16 *)_TIFFmalloc(256*sizeof(uint16)); ToLinearF = (float *)_TIFFmalloc(TSIZEP1 * sizeof(float)); ToLinear16 = (uint16 *)_TIFFmalloc(TSIZEP1 * sizeof(uint16)); ToLinear8 = (unsigned char *)_TIFFmalloc(TSIZEP1 * sizeof(unsigned char)); if (FromLT2 == NULL || From14 == NULL || From8 == NULL || ToLinearF == NULL || ToLinear16 == NULL || ToLinear8 == NULL) { if (FromLT2) _TIFFfree(FromLT2); if (From14) _TIFFfree(From14); if (From8) _TIFFfree(From8); if (ToLinearF) _TIFFfree(ToLinearF); if (ToLinear16) _TIFFfree(ToLinear16); if (ToLinear8) _TIFFfree(ToLinear8); sp->FromLT2 = NULL; sp->From14 = NULL; sp->From8 = NULL; sp->ToLinearF = NULL; sp->ToLinear16 = NULL; sp->ToLinear8 = NULL; return 0; } j = 0; for (i = 0; i < nlin; i++) { v = i * linstep; ToLinearF[j++] = (float)v; } for (i = nlin; i < TSIZE; i++) ToLinearF[j++] = (float)(b*exp(c*i)); ToLinearF[2048] = ToLinearF[2047]; for (i = 0; i < TSIZEP1; i++) { v = ToLinearF[i]*65535.0 + 0.5; ToLinear16[i] = (v > 65535.0) ? 65535 : (uint16)v; v = ToLinearF[i]*255.0 + 0.5; ToLinear8[i] = (v > 255.0) ? 255 : (unsigned char)v; } j = 0; for (i = 0; i < lt2size; i++) { if ((i*linstep)*(i*linstep) > ToLinearF[j]*ToLinearF[j+1]) j++; FromLT2[i] = (uint16)j; } /* * Since we lose info anyway on 16-bit data, we set up a 14-bit * table and shift 16-bit values down two bits on input. * saves a little table space. */ j = 0; for (i = 0; i < 16384; i++) { while ((i/16383.)*(i/16383.) > ToLinearF[j]*ToLinearF[j+1]) j++; From14[i] = (uint16)j; } j = 0; for (i = 0; i < 256; i++) { while ((i/255.)*(i/255.) > ToLinearF[j]*ToLinearF[j+1]) j++; From8[i] = (uint16)j; } Fltsize = (float)(lt2size/2); sp->ToLinearF = ToLinearF; sp->ToLinear16 = ToLinear16; sp->ToLinear8 = ToLinear8; sp->FromLT2 = FromLT2; sp->From14 = From14; sp->From8 = From8; return 1; } #define DecoderState(tif) ((PixarLogState*) (tif)->tif_data) #define EncoderState(tif) ((PixarLogState*) (tif)->tif_data) static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s); #define PIXARLOGDATAFMT_UNKNOWN -1 static int PixarLogGuessDataFmt(TIFFDirectory *td) { int guess = PIXARLOGDATAFMT_UNKNOWN; int format = td->td_sampleformat; /* If the user didn't tell us his datafmt, * take our best guess from the bitspersample. */ switch (td->td_bitspersample) { case 32: if (format == SAMPLEFORMAT_IEEEFP) guess = PIXARLOGDATAFMT_FLOAT; break; case 16: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_16BIT; break; case 12: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_INT) guess = PIXARLOGDATAFMT_12BITPICIO; break; case 11: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_11BITLOG; break; case 8: if (format == SAMPLEFORMAT_VOID || format == SAMPLEFORMAT_UINT) guess = PIXARLOGDATAFMT_8BIT; break; } return guess; } static tmsize_t multiply_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 * m2; if (m1 && bytes / m1 != m2) bytes = 0; return bytes; } static tmsize_t add_ms(tmsize_t m1, tmsize_t m2) { tmsize_t bytes = m1 + m2; /* if either input is zero, assume overflow already occurred */ if (m1 == 0 || m2 == 0) bytes = 0; else if (bytes <= m1 || bytes <= m2) bytes = 0; return bytes; } static int PixarLogFixupTags(TIFF* tif) { (void) tif; return (1); } static int PixarLogSetupDecode(TIFF* tif) { static const char module[] = "PixarLogSetupDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* Make sure no byte swapping happens on the data * after decompression. */ tif->tif_postdecode = _TIFFNoPostDecode; /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); /* add one more stride in case input ends mid-stride */ tbuf_size = add_ms(tbuf_size, sizeof(uint16) * sp->stride); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); sp->tbuf_size = tbuf_size; if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle bits depth/data format combination (depth: %d)", td->td_bitspersample); return (0); } if (inflateInit(&sp->stream) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Setup state for decoding a strip. */ static int PixarLogPreDecode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreDecode"; PixarLogState* sp = DecoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_in = tif->tif_rawdata; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) tif->tif_rawcc; if ((tmsize_t)sp->stream.avail_in != tif->tif_rawcc) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (inflateReset(&sp->stream) == Z_OK); } static int PixarLogDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s) { static const char module[] = "PixarLogDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t i; tmsize_t nsamples; int llen; uint16 *up; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: nsamples = occ / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: nsamples = occ / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: nsamples = occ; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; (void) s; assert(sp != NULL); sp->stream.next_out = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt) (nsamples * sizeof(uint16)); if (sp->stream.avail_out != nsamples * sizeof(uint16)) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } /* Check that we will not fill more than what was allocated */ if ((tmsize_t)sp->stream.avail_out > sp->tbuf_size) { TIFFErrorExt(tif->tif_clientdata, module, "sp->stream.avail_out > sp->tbuf_size"); return (0); } do { int state = inflate(&sp->stream, Z_PARTIAL_FLUSH); if (state == Z_STREAM_END) { break; /* XXX */ } if (state == Z_DATA_ERROR) { TIFFErrorExt(tif->tif_clientdata, module, "Decoding error at scanline %lu, %s", (unsigned long) tif->tif_row, sp->stream.msg ? sp->stream.msg : "(null)"); if (inflateSync(&sp->stream) != Z_OK) return (0); continue; } if (state != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (sp->stream.avail_out > 0); /* hopefully, we got all the bytes we needed */ if (sp->stream.avail_out != 0) { TIFFErrorExt(tif->tif_clientdata, module, "Not enough data at scanline %lu (short " TIFF_UINT64_FORMAT " bytes)", (unsigned long) tif->tif_row, (TIFF_UINT64_T) sp->stream.avail_out); return (0); } up = sp->tbuf; /* Swap bytes in the data if from a different endian machine. */ if (tif->tif_flags & TIFF_SWAB) TIFFSwabArrayOfShort(up, nsamples); /* * if llen is not an exact multiple of nsamples, the decode operation * may overflow the output buffer, so truncate it enough to prevent * that but still salvage as much data as possible. */ if (nsamples % llen) { TIFFWarningExt(tif->tif_clientdata, module, "stride %lu is not a multiple of sample count, " "%lu, data truncated.", (unsigned long) llen, (unsigned long) nsamples); nsamples -= nsamples % llen; } for (i = 0; i < nsamples; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalAccumulateF(up, llen, sp->stride, (float *)op, sp->ToLinearF); op += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalAccumulate16(up, llen, sp->stride, (uint16 *)op, sp->ToLinear16); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_12BITPICIO: horizontalAccumulate12(up, llen, sp->stride, (int16 *)op, sp->ToLinearF); op += llen * sizeof(int16); break; case PIXARLOGDATAFMT_11BITLOG: horizontalAccumulate11(up, llen, sp->stride, (uint16 *)op); op += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalAccumulate8(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; case PIXARLOGDATAFMT_8BITABGR: horizontalAccumulate8abgr(up, llen, sp->stride, (unsigned char *)op, sp->ToLinear8); op += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "Unsupported bits/sample: %d", td->td_bitspersample); return (0); } } return (1); } static int PixarLogSetupEncode(TIFF* tif) { static const char module[] = "PixarLogSetupEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = EncoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle %d bit linear encodings", td->td_bitspersample); return (0); } if (deflateInit(&sp->stream, sp->quality) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } } /* * Reset encoding state at the start of a strip. */ static int PixarLogPreEncode(TIFF* tif, uint16 s) { static const char module[] = "PixarLogPreEncode"; PixarLogState *sp = EncoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_out = tif->tif_rawdata; assert(sizeof(sp->stream.avail_out)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_out = (uInt)tif->tif_rawdatasize; if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } return (deflateReset(&sp->stream) == Z_OK); } static void horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2) { int32 r1, g1, b1, a1, r2, g2, b2, a2, mask; float fltsize = Fltsize; #define CLAMP(v) ( (v<(float)0.) ? 0 \ : (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \ : (v>(float)24.2) ? 2047 \ : LogK1*log(v*LogK2) + 0.5 ) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = (uint16) CLAMP(ip[0]); g2 = wp[1] = (uint16) CLAMP(ip[1]); b2 = wp[2] = (uint16) CLAMP(ip[2]); a2 = wp[3] = (uint16) CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--) } } } static void horizontalDifference16(unsigned short *ip, int n, int stride, unsigned short *wp, uint16 *From14) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; /* assumption is unsigned pixel values */ #undef CLAMP #define CLAMP(v) From14[(v) >> 2] mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; wp += 3; ip += 3; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; wp += 4; ip += 4; r1 = CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1; } } else { ip += n - 1; /* point to last one */ wp += n - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } } static void horizontalDifference8(unsigned char *ip, int n, int stride, unsigned short *wp, uint16 *From8) { register int r1, g1, b1, a1, r2, g2, b2, a2, mask; #undef CLAMP #define CLAMP(v) (From8[(v)]) mask = CODE_MASK; if (n >= stride) { if (stride == 3) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); n -= 3; while (n > 0) { n -= 3; r1 = CLAMP(ip[3]); wp[3] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[4]); wp[4] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[5]); wp[5] = (uint16)((b1-b2) & mask); b2 = b1; wp += 3; ip += 3; } } else if (stride == 4) { r2 = wp[0] = CLAMP(ip[0]); g2 = wp[1] = CLAMP(ip[1]); b2 = wp[2] = CLAMP(ip[2]); a2 = wp[3] = CLAMP(ip[3]); n -= 4; while (n > 0) { n -= 4; r1 = CLAMP(ip[4]); wp[4] = (uint16)((r1-r2) & mask); r2 = r1; g1 = CLAMP(ip[5]); wp[5] = (uint16)((g1-g2) & mask); g2 = g1; b1 = CLAMP(ip[6]); wp[6] = (uint16)((b1-b2) & mask); b2 = b1; a1 = CLAMP(ip[7]); wp[7] = (uint16)((a1-a2) & mask); a2 = a1; wp += 4; ip += 4; } } else { wp += n + stride - 1; /* point to last one */ ip += n + stride - 1; /* point to last one */ n -= stride; while (n > 0) { REPEAT(stride, wp[0] = CLAMP(ip[0]); wp[stride] -= wp[0]; wp[stride] &= mask; wp--; ip--) n -= stride; } REPEAT(stride, wp[0] = CLAMP(ip[0]); wp--; ip--) } } } /* * Encode a chunk of pixels. */ static int PixarLogEncode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { static const char module[] = "PixarLogEncode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState *sp = EncoderState(tif); tmsize_t i; tmsize_t n; int llen; unsigned short * up; (void) s; switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: n = cc / sizeof(float); /* XXX float == 32 bits */ break; case PIXARLOGDATAFMT_16BIT: case PIXARLOGDATAFMT_12BITPICIO: case PIXARLOGDATAFMT_11BITLOG: n = cc / sizeof(uint16); /* XXX uint16 == 16 bits */ break; case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: n = cc; break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } llen = sp->stride * td->td_imagewidth; /* Check against the number of elements (of size uint16) of sp->tbuf */ if( n > (tmsize_t)(td->td_rowsperstrip * llen) ) { TIFFErrorExt(tif->tif_clientdata, module, "Too many input bytes provided"); return 0; } for (i = 0, up = sp->tbuf; i < n; i += llen, up += llen) { switch (sp->user_datafmt) { case PIXARLOGDATAFMT_FLOAT: horizontalDifferenceF((float *)bp, llen, sp->stride, up, sp->FromLT2); bp += llen * sizeof(float); break; case PIXARLOGDATAFMT_16BIT: horizontalDifference16((uint16 *)bp, llen, sp->stride, up, sp->From14); bp += llen * sizeof(uint16); break; case PIXARLOGDATAFMT_8BIT: horizontalDifference8((unsigned char *)bp, llen, sp->stride, up, sp->From8); bp += llen * sizeof(unsigned char); break; default: TIFFErrorExt(tif->tif_clientdata, module, "%d bit input not supported in PixarLog", td->td_bitspersample); return 0; } } sp->stream.next_in = (unsigned char *) sp->tbuf; assert(sizeof(sp->stream.avail_in)==4); /* if this assert gets raised, we need to simplify this code to reflect a ZLib that is likely updated to deal with 8byte memory sizes, though this code will respond appropriately even before we simplify it */ sp->stream.avail_in = (uInt) (n * sizeof(uint16)); if ((sp->stream.avail_in / sizeof(uint16)) != (uInt) n) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib cannot deal with buffers this size"); return (0); } do { if (deflate(&sp->stream, Z_NO_FLUSH) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "Encoder error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } if (sp->stream.avail_out == 0) { tif->tif_rawcc = tif->tif_rawdatasize; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } } while (sp->stream.avail_in > 0); return (1); } /* * Finish off an encoded strip by flushing the last * string and tacking on an End Of Information code. */ static int PixarLogPostEncode(TIFF* tif) { static const char module[] = "PixarLogPostEncode"; PixarLogState *sp = EncoderState(tif); int state; sp->stream.avail_in = 0; do { state = deflate(&sp->stream, Z_FINISH); switch (state) { case Z_STREAM_END: case Z_OK: if ((tmsize_t)sp->stream.avail_out != tif->tif_rawdatasize) { tif->tif_rawcc = tif->tif_rawdatasize - sp->stream.avail_out; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = (uInt) tif->tif_rawdatasize; /* this is a safe typecast, as check is made already in PixarLogPreEncode */ } break; default: TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } while (state != Z_STREAM_END); return (1); } static void PixarLogClose(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; /* In a really sneaky (and really incorrect, and untruthful, and * troublesome, and error-prone) maneuver that completely goes against * the spirit of TIFF, and breaks TIFF, on close, we covertly * modify both bitspersample and sampleformat in the directory to * indicate 8-bit linear. This way, the decode "just works" even for * readers that don't know about PixarLog, or how to set * the PIXARLOGDATFMT pseudo-tag. */ td->td_bitspersample = 8; td->td_sampleformat = SAMPLEFORMAT_UINT; } static void PixarLogCleanup(TIFF* tif) { PixarLogState* sp = (PixarLogState*) tif->tif_data; assert(sp != 0); (void)TIFFPredictorCleanup(tif); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; if (sp->FromLT2) _TIFFfree(sp->FromLT2); if (sp->From14) _TIFFfree(sp->From14); if (sp->From8) _TIFFfree(sp->From8); if (sp->ToLinearF) _TIFFfree(sp->ToLinearF); if (sp->ToLinear16) _TIFFfree(sp->ToLinear16); if (sp->ToLinear8) _TIFFfree(sp->ToLinear8); if (sp->state&PLSTATE_INIT) { if (tif->tif_mode == O_RDONLY) inflateEnd(&sp->stream); else deflateEnd(&sp->stream); } if (sp->tbuf) _TIFFfree(sp->tbuf); _TIFFfree(sp); tif->tif_data = NULL; _TIFFSetDefaultCompressionState(tif); } static int PixarLogVSetField(TIFF* tif, uint32 tag, va_list ap) { static const char module[] = "PixarLogVSetField"; PixarLogState *sp = (PixarLogState *)tif->tif_data; int result; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: sp->quality = (int) va_arg(ap, int); if (tif->tif_mode != O_RDONLY && (sp->state&PLSTATE_INIT)) { if (deflateParams(&sp->stream, sp->quality, Z_DEFAULT_STRATEGY) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "ZLib error: %s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } } return (1); case TIFFTAG_PIXARLOGDATAFMT: sp->user_datafmt = (int) va_arg(ap, int); /* Tweak the TIFF header so that the rest of libtiff knows what * size of data will be passed between app and library, and * assume that the app knows what it is doing and is not * confused by these header manipulations... */ switch (sp->user_datafmt) { case PIXARLOGDATAFMT_8BIT: case PIXARLOGDATAFMT_8BITABGR: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_11BITLOG: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_12BITPICIO: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_INT); break; case PIXARLOGDATAFMT_16BIT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 16); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); break; case PIXARLOGDATAFMT_FLOAT: TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 32); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_IEEEFP); break; } /* * Must recalculate sizes should bits/sample change. */ tif->tif_tilesize = isTiled(tif) ? TIFFTileSize(tif) : (tmsize_t)(-1); tif->tif_scanlinesize = TIFFScanlineSize(tif); result = 1; /* NB: pseudo tag */ break; default: result = (*sp->vsetparent)(tif, tag, ap); } return (result); } static int PixarLogVGetField(TIFF* tif, uint32 tag, va_list ap) { PixarLogState *sp = (PixarLogState *)tif->tif_data; switch (tag) { case TIFFTAG_PIXARLOGQUALITY: *va_arg(ap, int*) = sp->quality; break; case TIFFTAG_PIXARLOGDATAFMT: *va_arg(ap, int*) = sp->user_datafmt; break; default: return (*sp->vgetparent)(tif, tag, ap); } return (1); } static const TIFFField pixarlogFields[] = { {TIFFTAG_PIXARLOGDATAFMT, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL}, {TIFFTAG_PIXARLOGQUALITY, 0, 0, TIFF_ANY, 0, TIFF_SETGET_INT, TIFF_SETGET_UNDEFINED, FIELD_PSEUDO, FALSE, FALSE, "", NULL} }; int TIFFInitPixarLog(TIFF* tif, int scheme) { static const char module[] = "TIFFInitPixarLog"; PixarLogState* sp; assert(scheme == COMPRESSION_PIXARLOG); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, pixarlogFields, TIFFArrayCount(pixarlogFields))) { TIFFErrorExt(tif->tif_clientdata, module, "Merging PixarLog codec-specific tags failed"); return 0; } /* * Allocate state block so tag methods have storage to record values. */ tif->tif_data = (uint8*) _TIFFmalloc(sizeof (PixarLogState)); if (tif->tif_data == NULL) goto bad; sp = (PixarLogState*) tif->tif_data; _TIFFmemset(sp, 0, sizeof (*sp)); sp->stream.data_type = Z_BINARY; sp->user_datafmt = PIXARLOGDATAFMT_UNKNOWN; /* * Install codec methods. */ tif->tif_fixuptags = PixarLogFixupTags; tif->tif_setupdecode = PixarLogSetupDecode; tif->tif_predecode = PixarLogPreDecode; tif->tif_decoderow = PixarLogDecode; tif->tif_decodestrip = PixarLogDecode; tif->tif_decodetile = PixarLogDecode; tif->tif_setupencode = PixarLogSetupEncode; tif->tif_preencode = PixarLogPreEncode; tif->tif_postencode = PixarLogPostEncode; tif->tif_encoderow = PixarLogEncode; tif->tif_encodestrip = PixarLogEncode; tif->tif_encodetile = PixarLogEncode; tif->tif_close = PixarLogClose; tif->tif_cleanup = PixarLogCleanup; /* Override SetField so we can handle our private pseudo-tag */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PixarLogVGetField; /* hook for codec tags */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PixarLogVSetField; /* hook for codec tags */ /* Default values for codec-specific fields */ sp->quality = Z_DEFAULT_COMPRESSION; /* default comp. level */ sp->state = 0; /* we don't wish to use the predictor, * the default is none, which predictor value 1 */ (void) TIFFPredictorInit(tif); /* * build the companding tables */ PixarLogMakeTables(sp); return (1); bad: TIFFErrorExt(tif->tif_clientdata, module, "No space for PixarLog state block"); return (0); } #endif /* PIXARLOG_SUPPORT */ /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5475_1
crossvul-cpp_data_bad_5575_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */ /* * Copyright 1990,1991,2001,2007,2008,2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include <syslog.h> #ifdef HAVE_NETINET_IN_H #include <sys/types.h> #include <netinet/in.h> #ifndef hpux #include <arpa/inet.h> #endif #endif #include "kdc_util.h" #include "policy.h" #include "extern.h" #include "adm_proto.h" #include <ctype.h> static krb5_error_code find_alternate_tgs(krb5_kdc_req *,krb5_db_entry **); static krb5_error_code prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int, krb5_principal,krb5_data **,const char *, krb5_pa_data **); static krb5_int32 prep_reprocess_req(krb5_kdc_req *,krb5_principal *); /*ARGSUSED*/ krb5_error_code process_tgs_req(krb5_data *pkt, const krb5_fulladdr *from, krb5_data **response) { krb5_keyblock * subkey = 0; krb5_keyblock * tgskey = 0; krb5_kdc_req *request = 0; krb5_db_entry *server = NULL; krb5_kdc_rep reply; krb5_enc_kdc_rep_part reply_encpart; krb5_ticket ticket_reply, *header_ticket = 0; int st_idx = 0; krb5_enc_tkt_part enc_tkt_reply; krb5_transited enc_tkt_transited; int newtransited = 0; krb5_error_code retval = 0; krb5_keyblock encrypting_key; krb5_timestamp kdc_time, authtime = 0; krb5_keyblock session_key; krb5_timestamp rtime; krb5_keyblock *reply_key = NULL; krb5_key_data *server_key; char *cname = 0, *sname = 0, *altcname = 0; krb5_last_req_entry *nolrarray[2], nolrentry; krb5_enctype useenctype; int errcode, errcode2; register int i; int firstpass = 1; const char *status = 0; krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */ krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */ krb5_db_entry *client = NULL, *krbtgt = NULL; krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */ krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */ unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */ char *s4u_name = NULL; krb5_boolean is_referral, db_ref_done = FALSE; const char *emsg = NULL; krb5_data *tgs_1 =NULL, *server_1 = NULL; krb5_principal krbtgt_princ; krb5_kvno ticket_kvno = 0; struct kdc_request_state *state = NULL; krb5_pa_data *pa_tgs_req; /*points into request*/ krb5_data scratch; krb5_pa_data **e_data = NULL; reply.padata = 0; /* For cleanup handler */ reply_encpart.enc_padata = 0; enc_tkt_reply.authorization_data = NULL; session_key.contents = NULL; retval = decode_krb5_tgs_req(pkt, &request); if (retval) return retval; if (request->msg_type != KRB5_TGS_REQ) { krb5_free_kdc_req(kdc_context, request); return KRB5_BADMSGTYPE; } /* * setup_server_realm() sets up the global realm-specific data pointer. */ if ((retval = setup_server_realm(request->server))) { krb5_free_kdc_req(kdc_context, request); return retval; } errcode = kdc_process_tgs_req(request, from, pkt, &header_ticket, &krbtgt, &tgskey, &subkey, &pa_tgs_req); if (header_ticket && header_ticket->enc_part2 && (errcode2 = krb5_unparse_name(kdc_context, header_ticket->enc_part2->client, &cname))) { status = "UNPARSING CLIENT"; errcode = errcode2; goto cleanup; } limit_string(cname); if (errcode) { status = "PROCESS_TGS"; goto cleanup; } if (!header_ticket) { errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */ status="UNEXPECTED NULL in header_ticket"; goto cleanup; } errcode = kdc_make_rstate(&state); if (errcode !=0) { status = "making state"; goto cleanup; } scratch.length = pa_tgs_req->length; scratch.data = (char *) pa_tgs_req->contents; errcode = kdc_find_fast(&request, &scratch, subkey, header_ticket->enc_part2->session, state, NULL); if (errcode !=0) { status = "kdc_find_fast"; goto cleanup; } /* * Pointer to the encrypted part of the header ticket, which may be * replaced to point to the encrypted part of the evidence ticket * if constrained delegation is used. This simplifies the number of * special cases for constrained delegation. */ header_enc_tkt = header_ticket->enc_part2; /* * We've already dealt with the AP_REQ authentication, so we can * use header_ticket freely. The encrypted part (if any) has been * decrypted with the session key. */ /* XXX make sure server here has the proper realm...taken from AP_REQ header? */ setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK); if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) { setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE); setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE); } db_ref_done = FALSE; ref_tgt_again: if ((errcode = krb5_unparse_name(kdc_context, request->server, &sname))) { status = "UNPARSING SERVER"; goto cleanup; } limit_string(sname); errcode = krb5_db_get_principal(kdc_context, request->server, s_flags, &server); if (errcode && errcode != KRB5_KDB_NOENTRY) { status = "LOOKING_UP_SERVER"; goto cleanup; } tgt_again: if (errcode == KRB5_KDB_NOENTRY) { /* * might be a request for a TGT for some other realm; we * should do our best to find such a TGS in this db */ if (firstpass ) { if ( krb5_is_tgs_principal(request->server) == TRUE) { /* Principal is a name of krb ticket service */ if (krb5_princ_size(kdc_context, request->server) == 2) { server_1 = krb5_princ_component(kdc_context, request->server, 1); tgs_1 = krb5_princ_component(kdc_context, tgs_server, 1); if (!tgs_1 || !data_eq(*server_1, *tgs_1)) { errcode = find_alternate_tgs(request, &server); firstpass = 0; if (errcode == 0) goto tgt_again; } } status = "UNKNOWN_SERVER"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto cleanup; } else if ( db_ref_done == FALSE) { retval = prep_reprocess_req(request, &krbtgt_princ); if (!retval) { krb5_free_principal(kdc_context, request->server); retval = krb5_copy_principal(kdc_context, krbtgt_princ, &(request->server)); if (!retval) { db_ref_done = TRUE; if (sname != NULL) free(sname); goto ref_tgt_again; } } } } status = "UNKNOWN_SERVER"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto cleanup; } if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) { status = "TIME_OF_DAY"; goto cleanup; } if ((retval = validate_tgs_request(request, *server, header_ticket, kdc_time, &status, &e_data))) { if (!status) status = "UNKNOWN_REASON"; errcode = retval + ERROR_TABLE_BASE_krb5; goto cleanup; } if (!is_local_principal(header_enc_tkt->client)) setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM); is_referral = krb5_is_tgs_principal(server->princ) && !krb5_principal_compare(kdc_context, tgs_server, server->princ); /* Check for protocol transition */ errcode = kdc_process_s4u2self_req(kdc_context, request, header_enc_tkt->client, server, subkey, header_enc_tkt->session, kdc_time, &s4u_x509_user, &client, &status); if (errcode) goto cleanup; if (s4u_x509_user != NULL) setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION); /* * We pick the session keytype here.... * * Some special care needs to be taken in the user-to-user * case, since we don't know what keytypes the application server * which is doing user-to-user authentication can support. We * know that it at least must be able to support the encryption * type of the session key in the TGT, since otherwise it won't be * able to decrypt the U2U ticket! So we use that in preference * to anything else. */ useenctype = 0; if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY | KDC_OPT_CNAME_IN_ADDL_TKT)) { krb5_keyblock * st_sealing_key; krb5_kvno st_srv_kvno; krb5_enctype etype; krb5_db_entry *st_client; /* * Get the key for the second ticket, and decrypt it. */ if ((errcode = kdc_get_server_key(request->second_ticket[st_idx], c_flags, TRUE, /* match_enctype */ &st_client, &st_sealing_key, &st_srv_kvno))) { status = "2ND_TKT_SERVER"; goto cleanup; } errcode = krb5_decrypt_tkt_part(kdc_context, st_sealing_key, request->second_ticket[st_idx]); krb5_free_keyblock(kdc_context, st_sealing_key); if (errcode) { status = "2ND_TKT_DECRYPT"; krb5_db_free_principal(kdc_context, st_client); goto cleanup; } etype = request->second_ticket[st_idx]->enc_part2->session->enctype; if (!krb5_c_valid_enctype(etype)) { status = "BAD_ETYPE_IN_2ND_TKT"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; krb5_db_free_principal(kdc_context, st_client); goto cleanup; } for (i = 0; i < request->nktypes; i++) { if (request->ktype[i] == etype) { useenctype = etype; break; } } if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) { /* Do constrained delegation protocol and authorization checks */ errcode = kdc_process_s4u2proxy_req(kdc_context, request, request->second_ticket[st_idx]->enc_part2, st_client, header_ticket->enc_part2->client, request->server, &status); if (errcode) goto cleanup; setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION); assert(krb5_is_tgs_principal(header_ticket->server)); assert(client == NULL); /* assured by kdc_process_s4u2self_req() */ client = st_client; } else { /* "client" is not used for user2user */ krb5_db_free_principal(kdc_context, st_client); } } /* * Select the keytype for the ticket session key. */ if ((useenctype == 0) && (useenctype = select_session_keytype(kdc_context, server, request->nktypes, request->ktype)) == 0) { /* unsupported ktype */ status = "BAD_ENCRYPTION_TYPE"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; goto cleanup; } errcode = krb5_c_make_random_key(kdc_context, useenctype, &session_key); if (errcode) { /* random key failed */ status = "RANDOM_KEY_FAILED"; goto cleanup; } /* * subject_tkt will refer to the evidence ticket (for constrained * delegation) or the TGT. The distinction from header_enc_tkt is * necessary because the TGS signature only protects some fields: * the others could be forged by a malicious server. */ if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) subject_tkt = request->second_ticket[st_idx]->enc_part2; else subject_tkt = header_enc_tkt; authtime = subject_tkt->times.authtime; if (is_referral) ticket_reply.server = server->princ; else ticket_reply.server = request->server; /* XXX careful for realm... */ enc_tkt_reply.flags = 0; enc_tkt_reply.times.starttime = 0; if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE)) setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE); /* * Fix header_ticket's starttime; if it's zero, fill in the * authtime's value. */ if (!(header_enc_tkt->times.starttime)) header_enc_tkt->times.starttime = authtime; setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP); /* don't use new addresses unless forwarded, see below */ enc_tkt_reply.caddrs = header_enc_tkt->caddrs; /* noaddrarray[0] = 0; */ reply_encpart.caddrs = 0;/* optional...don't put it in */ reply_encpart.enc_padata = NULL; /* * It should be noted that local policy may affect the * processing of any of these flags. For example, some * realms may refuse to issue renewable tickets */ if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) { setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { /* * If S4U2Self principal is not forwardable, then mark ticket as * unforwardable. This behaviour matches Windows, but it is * different to the MIT AS-REQ path, which returns an error * (KDC_ERR_POLICY) if forwardable tickets cannot be issued. * * Consider this block the S4U2Self equivalent to * validate_forwardable(). */ if (client != NULL && isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * Forwardable flag is propagated along referral path. */ else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * OK_TO_AUTH_AS_DELEGATE must be set on the service requesting * S4U2Self in order for forwardable tickets to be returned. */ else if (!is_referral && !isflagset(server->attributes, KRB5_KDB_OK_TO_AUTH_AS_DELEGATE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); } } if (isflagset(request->kdc_options, KDC_OPT_FORWARDED)) { setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDED); /* include new addresses in ticket & reply */ enc_tkt_reply.caddrs = request->addresses; reply_encpart.caddrs = request->addresses; } if (isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDED)) setflag(enc_tkt_reply.flags, TKT_FLG_FORWARDED); if (isflagset(request->kdc_options, KDC_OPT_PROXIABLE)) setflag(enc_tkt_reply.flags, TKT_FLG_PROXIABLE); if (isflagset(request->kdc_options, KDC_OPT_PROXY)) { setflag(enc_tkt_reply.flags, TKT_FLG_PROXY); /* include new addresses in ticket & reply */ enc_tkt_reply.caddrs = request->addresses; reply_encpart.caddrs = request->addresses; } if (isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE)) setflag(enc_tkt_reply.flags, TKT_FLG_MAY_POSTDATE); if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) { setflag(enc_tkt_reply.flags, TKT_FLG_POSTDATED); setflag(enc_tkt_reply.flags, TKT_FLG_INVALID); enc_tkt_reply.times.starttime = request->from; } else enc_tkt_reply.times.starttime = kdc_time; if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; clear(enc_tkt_reply.flags, TKT_FLG_INVALID); } if (isflagset(request->kdc_options, KDC_OPT_RENEW)) { krb5_deltat old_life; assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; old_life = enc_tkt_reply.times.endtime - enc_tkt_reply.times.starttime; enc_tkt_reply.times.starttime = kdc_time; enc_tkt_reply.times.endtime = min(header_ticket->enc_part2->times.renew_till, kdc_time + old_life); } else { /* not a renew request */ enc_tkt_reply.times.starttime = kdc_time; kdc_get_ticket_endtime(kdc_context, enc_tkt_reply.times.starttime, header_enc_tkt->times.endtime, request->till, client, server, &enc_tkt_reply.times.endtime); if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) && (enc_tkt_reply.times.endtime < request->till) && isflagset(header_enc_tkt->flags, TKT_FLG_RENEWABLE)) { setflag(request->kdc_options, KDC_OPT_RENEWABLE); request->rtime = min(request->till, header_enc_tkt->times.renew_till); } } rtime = (request->rtime == 0) ? kdc_infinity : request->rtime; if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) { /* already checked above in policy check to reject request for a renewable ticket using a non-renewable ticket */ setflag(enc_tkt_reply.flags, TKT_FLG_RENEWABLE); enc_tkt_reply.times.renew_till = min(rtime, min(header_enc_tkt->times.renew_till, enc_tkt_reply.times.starttime + min(server->max_renewable_life, max_renewable_life_for_realm))); } else { enc_tkt_reply.times.renew_till = 0; } if (isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS)) setflag(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS); /* * Set authtime to be the same as header or evidence ticket's */ enc_tkt_reply.times.authtime = authtime; /* * Propagate the preauthentication flags through to the returned ticket. */ if (isflagset(header_enc_tkt->flags, TKT_FLG_PRE_AUTH)) setflag(enc_tkt_reply.flags, TKT_FLG_PRE_AUTH); if (isflagset(header_enc_tkt->flags, TKT_FLG_HW_AUTH)) setflag(enc_tkt_reply.flags, TKT_FLG_HW_AUTH); /* starttime is optional, and treated as authtime if not present. so we can nuke it if it matches */ if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime) enc_tkt_reply.times.starttime = 0; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { errcode = krb5_unparse_name(kdc_context, s4u_x509_user->user_id.user, &s4u_name); } else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { errcode = krb5_unparse_name(kdc_context, subject_tkt->client, &s4u_name); } else { errcode = 0; } if (errcode) { status = "UNPARSING S4U CLIENT"; goto cleanup; } if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; encrypting_key = *(t2enc->session); } else { /* * Find the server key */ if ((errcode = krb5_dbe_find_enctype(kdc_context, server, -1, /* ignore keytype */ -1, /* Ignore salttype */ 0, /* Get highest kvno */ &server_key))) { status = "FINDING_SERVER_KEY"; goto cleanup; } /* * Convert server.key into a real key * (it may be encrypted in the database) */ if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL, server_key, &encrypting_key, NULL))) { status = "DECRYPT_SERVER_KEY"; goto cleanup; } } if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { /* * Don't allow authorization data to be disabled if constrained * delegation is requested. We don't want to deny the server * the ability to validate that delegation was used. */ clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED); } if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) { /* * If we are not doing protocol transition/constrained delegation * try to lookup the client principal so plugins can add additional * authorization information. * * Always validate authorization data for constrained delegation * because we must validate the KDC signatures. */ if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) { /* Generate authorization data so we can include it in ticket */ setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC); /* Map principals from foreign (possibly non-AD) realms */ setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS); assert(client == NULL); /* should not have been set already */ errcode = krb5_db_get_principal(kdc_context, subject_tkt->client, c_flags, &client); } } if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && !isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) enc_tkt_reply.client = s4u_x509_user->user_id.user; else enc_tkt_reply.client = subject_tkt->client; enc_tkt_reply.session = &session_key; enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */ errcode = handle_authdata(kdc_context, c_flags, client, server, krbtgt, subkey != NULL ? subkey : header_ticket->enc_part2->session, &encrypting_key, /* U2U or server key */ tgskey, pkt, request, s4u_x509_user ? s4u_x509_user->user_id.user : NULL, subject_tkt, &enc_tkt_reply); if (errcode) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"), errcode); status = "HANDLE_AUTHDATA"; goto cleanup; } /* * Only add the realm of the presented tgt to the transited list if * it is different than the local realm (cross-realm) and it is different * than the realm of the client (since the realm of the client is already * implicitly part of the transited list and should not be explicitly * listed). */ /* realm compare is like strcmp, but knows how to deal with these args */ if (realm_compare(header_ticket->server, tgs_server) || realm_compare(header_ticket->server, enc_tkt_reply.client)) { /* tgt issued by local realm or issued by realm of client */ enc_tkt_reply.transited = header_enc_tkt->transited; } else { /* tgt issued by some other realm and not the realm of the client */ /* assemble new transited field into allocated storage */ if (header_enc_tkt->transited.tr_type != KRB5_DOMAIN_X500_COMPRESS) { status = "BAD_TRTYPE"; errcode = KRB5KDC_ERR_TRTYPE_NOSUPP; goto cleanup; } enc_tkt_transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; enc_tkt_transited.magic = 0; enc_tkt_transited.tr_contents.magic = 0; enc_tkt_transited.tr_contents.data = 0; enc_tkt_transited.tr_contents.length = 0; enc_tkt_reply.transited = enc_tkt_transited; if ((errcode = add_to_transited(&header_enc_tkt->transited.tr_contents, &enc_tkt_reply.transited.tr_contents, header_ticket->server, enc_tkt_reply.client, request->server))) { status = "ADD_TR_FAIL"; goto cleanup; } newtransited = 1; } if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) { errcode = validate_transit_path(kdc_context, header_enc_tkt->client, server, krbtgt); if (errcode) { status = "NON_TRANSITIVE"; goto cleanup; } } if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) { unsigned int tlen; char *tdots; errcode = kdc_check_transited_list (kdc_context, &enc_tkt_reply.transited.tr_contents, krb5_princ_realm (kdc_context, header_enc_tkt->client), krb5_princ_realm (kdc_context, request->server)); tlen = enc_tkt_reply.transited.tr_contents.length; tdots = tlen > 125 ? "..." : ""; tlen = tlen > 125 ? 125 : tlen; if (errcode == 0) { setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED); } else if (errcode == KRB5KRB_AP_ERR_ILL_CR_TKT) krb5_klog_syslog(LOG_INFO, _("bad realm transit path from '%s' " "to '%s' via '%.*s%s'"), cname ? cname : "<unknown client>", sname ? sname : "<unknown server>", tlen, enc_tkt_reply.transited.tr_contents.data, tdots); else { emsg = krb5_get_error_message(kdc_context, errcode); krb5_klog_syslog(LOG_ERR, _("unexpected error checking transit " "from '%s' to '%s' via '%.*s%s': %s"), cname ? cname : "<unknown client>", sname ? sname : "<unknown server>", tlen, enc_tkt_reply.transited.tr_contents.data, tdots, emsg); krb5_free_error_message(kdc_context, emsg); emsg = NULL; } } else krb5_klog_syslog(LOG_INFO, _("not checking transit path")); if (reject_bad_transit && !isflagset (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) { errcode = KRB5KDC_ERR_POLICY; status = "BAD_TRANSIT"; goto cleanup; } ticket_reply.enc_part2 = &enc_tkt_reply; /* * If we are doing user-to-user authentication, then make sure * that the client for the second ticket matches the request * server, and then encrypt the ticket using the session key of * the second ticket. */ if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { /* * Make sure the client for the second ticket matches * requested server. */ krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; krb5_principal client2 = t2enc->client; if (!krb5_principal_compare(kdc_context, request->server, client2)) { if ((errcode = krb5_unparse_name(kdc_context, client2, &altcname))) altcname = 0; if (altcname != NULL) limit_string(altcname); errcode = KRB5KDC_ERR_SERVER_NOMATCH; status = "2ND_TKT_MISMATCH"; goto cleanup; } ticket_kvno = 0; ticket_reply.enc_part.enctype = t2enc->session->enctype; st_idx++; } else { ticket_kvno = server_key->key_data_kvno; } errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key, &ticket_reply); if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) krb5_free_keyblock_contents(kdc_context, &encrypting_key); if (errcode) { status = "TKT_ENCRYPT"; goto cleanup; } ticket_reply.enc_part.kvno = ticket_kvno; /* Start assembling the response */ reply.msg_type = KRB5_TGS_REP; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && find_pa_data(request->padata, KRB5_PADATA_S4U_X509_USER) != NULL) { errcode = kdc_make_s4u2self_rep(kdc_context, subkey, header_ticket->enc_part2->session, s4u_x509_user, &reply, &reply_encpart); if (errcode) { status = "KDC_RETURN_S4U2SELF_PADATA"; goto cleanup; } } reply.client = enc_tkt_reply.client; reply.enc_part.kvno = 0;/* We are using the session key */ reply.ticket = &ticket_reply; reply_encpart.session = &session_key; reply_encpart.nonce = request->nonce; /* copy the time fields */ reply_encpart.times = enc_tkt_reply.times; /* starttime is optional, and treated as authtime if not present. so we can nuke it if it matches */ if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime) enc_tkt_reply.times.starttime = 0; nolrentry.lr_type = KRB5_LRQ_NONE; nolrentry.value = 0; nolrarray[0] = &nolrentry; nolrarray[1] = 0; reply_encpart.last_req = nolrarray; /* not available for TGS reqs */ reply_encpart.key_exp = 0;/* ditto */ reply_encpart.flags = enc_tkt_reply.flags; reply_encpart.server = ticket_reply.server; /* use the session key in the ticket, unless there's a subsession key in the AP_REQ */ reply.enc_part.enctype = subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype; errcode = kdc_fast_response_handle_padata(state, request, &reply, subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype); if (errcode !=0 ) { status = "Preparing FAST padata"; goto cleanup; } errcode =kdc_fast_handle_reply_key(state, subkey?subkey:header_ticket->enc_part2->session, &reply_key); if (errcode) { status = "generating reply key"; goto cleanup; } errcode = return_enc_padata(kdc_context, pkt, request, reply_key, server, &reply_encpart, is_referral && isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE)); if (errcode) { status = "KDC_RETURN_ENC_PADATA"; goto cleanup; } errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart, subkey ? 1 : 0, reply_key, &reply, response); if (errcode) { status = "ENCODE_KDC_REP"; } else { status = "ISSUE"; } memset(ticket_reply.enc_part.ciphertext.data, 0, ticket_reply.enc_part.ciphertext.length); free(ticket_reply.enc_part.ciphertext.data); /* these parts are left on as a courtesy from krb5_encode_kdc_rep so we can use them in raw form if needed. But, we don't... */ memset(reply.enc_part.ciphertext.data, 0, reply.enc_part.ciphertext.length); free(reply.enc_part.ciphertext.data); cleanup: assert(status != NULL); if (reply_key) krb5_free_keyblock(kdc_context, reply_key); if (errcode) emsg = krb5_get_error_message (kdc_context, errcode); log_tgs_req(from, request, &reply, cname, sname, altcname, authtime, c_flags, s4u_name, status, errcode, emsg); if (errcode) { krb5_free_error_message (kdc_context, emsg); emsg = NULL; } if (errcode) { int got_err = 0; if (status == 0) { status = krb5_get_error_message (kdc_context, errcode); got_err = 1; } errcode -= ERROR_TABLE_BASE_krb5; if (errcode < 0 || errcode > 128) errcode = KRB_ERR_GENERIC; retval = prepare_error_tgs(state, request, header_ticket, errcode, (server != NULL) ? server->princ : NULL, response, status, e_data); if (got_err) { krb5_free_error_message (kdc_context, status); status = 0; } } if (header_ticket != NULL) krb5_free_ticket(kdc_context, header_ticket); if (request != NULL) krb5_free_kdc_req(kdc_context, request); if (state) kdc_free_rstate(state); if (cname != NULL) free(cname); if (sname != NULL) free(sname); krb5_db_free_principal(kdc_context, server); krb5_db_free_principal(kdc_context, krbtgt); krb5_db_free_principal(kdc_context, client); if (session_key.contents != NULL) krb5_free_keyblock_contents(kdc_context, &session_key); if (newtransited) free(enc_tkt_reply.transited.tr_contents.data); if (s4u_x509_user != NULL) krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user); if (kdc_issued_auth_data != NULL) krb5_free_authdata(kdc_context, kdc_issued_auth_data); if (s4u_name != NULL) free(s4u_name); if (subkey != NULL) krb5_free_keyblock(kdc_context, subkey); if (tgskey != NULL) krb5_free_keyblock(kdc_context, tgskey); if (reply.padata) krb5_free_pa_data(kdc_context, reply.padata); if (reply_encpart.enc_padata) krb5_free_pa_data(kdc_context, reply_encpart.enc_padata); if (enc_tkt_reply.authorization_data != NULL) krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data); krb5_free_pa_data(kdc_context, e_data); return retval; } static krb5_error_code prepare_error_tgs (struct kdc_request_state *state, krb5_kdc_req *request, krb5_ticket *ticket, int error, krb5_principal canon_server, krb5_data **response, const char *status, krb5_pa_data **e_data) { krb5_error errpkt; krb5_error_code retval = 0; krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL; errpkt.ctime = request->nonce; errpkt.cusec = 0; if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec))) return(retval); errpkt.error = error; errpkt.server = request->server; if (ticket && ticket->enc_part2) errpkt.client = ticket->enc_part2->client; else errpkt.client = NULL; errpkt.text.length = strlen(status); if (!(errpkt.text.data = strdup(status))) return ENOMEM; if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) { free(errpkt.text.data); return ENOMEM; } if (e_data != NULL) { retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) { free(scratch); free(errpkt.text.data); return retval; } errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); if (state) { retval = kdc_fast_handle_error(kdc_context, state, request, e_data, &errpkt, &fast_edata); } if (retval) { free(scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); return retval; } if (fast_edata) errpkt.e_data = *fast_edata; retval = krb5_mk_error(kdc_context, &errpkt, scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); krb5_free_data(kdc_context, fast_edata); if (retval) free(scratch); else *response = scratch; return retval; } /* * The request seems to be for a ticket-granting service somewhere else, * but we don't have a ticket for the final TGS. Try to give the requestor * some intermediate realm. */ static krb5_error_code find_alternate_tgs(krb5_kdc_req *request, krb5_db_entry **server_ptr) { krb5_error_code retval; krb5_principal *plist = NULL, *pl2, tmpprinc; krb5_data tmp; krb5_db_entry *server = NULL; *server_ptr = NULL; /* * Call to krb5_princ_component is normally not safe but is so * here only because find_alternate_tgs() is only called from * somewhere that has already checked the number of components in * the principal. */ if ((retval = krb5_walk_realm_tree(kdc_context, krb5_princ_realm(kdc_context, request->server), krb5_princ_component(kdc_context, request->server, 1), &plist, KRB5_REALM_BRANCH_CHAR))) return retval; /* move to the end */ for (pl2 = plist; *pl2; pl2++); /* the first entry in this array is for krbtgt/local@local, so we ignore it */ while (--pl2 > plist) { tmp = *krb5_princ_realm(kdc_context, *pl2); krb5_princ_set_realm(kdc_context, *pl2, krb5_princ_realm(kdc_context, tgs_server)); retval = krb5_db_get_principal(kdc_context, *pl2, 0, &server); krb5_princ_set_realm(kdc_context, *pl2, &tmp); if (retval == KRB5_KDB_NOENTRY) continue; else if (retval) goto cleanup; /* Found it. */ tmp = *krb5_princ_realm(kdc_context, *pl2); krb5_princ_set_realm(kdc_context, *pl2, krb5_princ_realm(kdc_context, tgs_server)); retval = krb5_copy_principal(kdc_context, *pl2, &tmpprinc); if (retval) goto cleanup; krb5_princ_set_realm(kdc_context, *pl2, &tmp); krb5_free_principal(kdc_context, request->server); request->server = tmpprinc; log_tgs_alt_tgt(request->server); *server_ptr = server; server = NULL; goto cleanup; } retval = KRB5_KDB_NOENTRY; cleanup: krb5_free_realm_tree(kdc_context, plist); krb5_db_free_principal(kdc_context, server); return retval; } static krb5_int32 prep_reprocess_req(krb5_kdc_req *request, krb5_principal *krbtgt_princ) { krb5_error_code retval = KRB5KRB_AP_ERR_BADMATCH; char **realms, **cpp, *temp_buf=NULL; krb5_data *comp1 = NULL, *comp2 = NULL; char *comp1_str = NULL; /* By now we know that server principal name is unknown. * If CANONICALIZE flag is set in the request * If req is not U2U authn. req * the requested server princ. has exactly two components * either * the name type is NT-SRV-HST * or name type is NT-UNKNOWN and * the 1st component is listed in conf file under host_based_services * the 1st component is not in a list in conf under "no_host_referral" * the 2d component looks like fully-qualified domain name (FQDN) * If all of these conditions are satisfied - try mapping the FQDN and * re-process the request as if client had asked for cross-realm TGT. */ if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE) && !isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY) && krb5_princ_size(kdc_context, request->server) == 2) { comp1 = krb5_princ_component(kdc_context, request->server, 0); comp2 = krb5_princ_component(kdc_context, request->server, 1); comp1_str = calloc(1,comp1->length+1); if (!comp1_str) { retval = ENOMEM; goto cleanup; } strlcpy(comp1_str,comp1->data,comp1->length+1); if ((krb5_princ_type(kdc_context, request->server) == KRB5_NT_SRV_HST || krb5_princ_type(kdc_context, request->server) == KRB5_NT_SRV_INST || (krb5_princ_type(kdc_context, request->server) == KRB5_NT_UNKNOWN && kdc_active_realm->realm_host_based_services != NULL && (krb5_match_config_pattern(kdc_active_realm->realm_host_based_services, comp1_str) == TRUE || krb5_match_config_pattern(kdc_active_realm->realm_host_based_services, KRB5_CONF_ASTERISK) == TRUE))) && (kdc_active_realm->realm_no_host_referral == NULL || (krb5_match_config_pattern(kdc_active_realm->realm_no_host_referral, KRB5_CONF_ASTERISK) == FALSE && krb5_match_config_pattern(kdc_active_realm->realm_no_host_referral, comp1_str) == FALSE))) { if (memchr(comp2->data, '.', comp2->length) == NULL) goto cleanup; temp_buf = calloc(1, comp2->length+1); if (!temp_buf) { retval = ENOMEM; goto cleanup; } strlcpy(temp_buf, comp2->data,comp2->length+1); retval = krb5int_get_domain_realm_mapping(kdc_context, temp_buf, &realms); free(temp_buf); if (retval) { /* no match found */ kdc_err(kdc_context, retval, "unable to find realm of host"); goto cleanup; } if (realms == 0) { retval = KRB5KRB_AP_ERR_BADMATCH; goto cleanup; } /* Don't return a referral to the null realm or the service * realm. */ if (realms[0] == 0 || data_eq_string(request->server->realm, realms[0])) { free(realms[0]); free(realms); retval = KRB5KRB_AP_ERR_BADMATCH; goto cleanup; } /* Modify request. * Construct cross-realm tgt : krbtgt/REMOTE_REALM@LOCAL_REALM * and use it as a principal in this req. */ retval = krb5_build_principal(kdc_context, krbtgt_princ, (*request->server).realm.length, (*request->server).realm.data, "krbtgt", realms[0], (char *)0); for (cpp = realms; *cpp; cpp++) free(*cpp); } } cleanup: free(comp1_str); return retval; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_5575_0
crossvul-cpp_data_good_1_0
/* Copyright (c) 2015, Cisco Systems All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <string.h> #include <memory.h> #include <assert.h> #include "global.h" #include "snr.h" #include "getvlc.h" #include "read_bits.h" #include "transform.h" #include "common_block.h" #include "inter_prediction.h" #include "intra_prediction.h" #include "simd.h" #include "wt_matrix.h" extern int chroma_qp[52]; static void decode_and_reconstruct_block_intra (SAMPLE *rec, int stride, int size, int qp, SAMPLE *pblock, int16_t *coeffq, int tb_split, int upright_available,int downleft_available, intra_mode_t intra_mode,int ypos,int xpos,int width,int comp, int bitdepth, qmtx_t ** iwmatrix){ int16_t *rcoeff = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock2 = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); SAMPLE* left_data = (SAMPLE*)thor_alloc((2*MAX_TR_SIZE+2)*sizeof(SAMPLE),32)+1; SAMPLE* top_data = (SAMPLE*)thor_alloc((2*MAX_TR_SIZE+2)*sizeof(SAMPLE),32)+1; SAMPLE top_left; if (tb_split){ int size2 = size/2; int i,j,index; for (i=0;i<size;i+=size2){ for (j=0;j<size;j+=size2){ TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec,stride,&rec[i*stride+j],stride,i,j,ypos,xpos,size2,upright_available,downleft_available,1,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos+i,xpos+j,size2,&pblock[i*size+j],size,intra_mode,bitdepth); index = 2*(i/size2) + (j/size2); TEMPLATE(dequantize)(coeffq+index*size2*size2, rcoeff, qp, size2, iwmatrix ? iwmatrix[log2i(size2/4)] : NULL); inverse_transform (rcoeff, rblock2, size2, bitdepth); TEMPLATE(reconstruct_block)(rblock2,&pblock[i*size+j],&rec[i*stride+j],size2,size,stride,bitdepth); } } } else{ TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec,stride,NULL,0,0,0,ypos,xpos,size,upright_available,downleft_available,0,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos,xpos,size,pblock,size,intra_mode,bitdepth); TEMPLATE(dequantize)(coeffq, rcoeff, qp, size, iwmatrix ? iwmatrix[log2i(size/4)] : NULL); inverse_transform (rcoeff, rblock, size, bitdepth); TEMPLATE(reconstruct_block)(rblock,pblock,rec,size,size,stride,bitdepth); } thor_free(top_data - 1); thor_free(left_data - 1); thor_free(rcoeff); thor_free(rblock); thor_free(rblock2); } static void decode_and_reconstruct_block_intra_uv (SAMPLE *rec_u, SAMPLE *rec_v, int stride, int size, int qp, SAMPLE *pblock_u, SAMPLE *pblock_v, int16_t *coeffq_u, int16_t *coeffq_v, int tb_split, int upright_available,int downleft_available, intra_mode_t intra_mode,int ypos,int xpos,int width,int comp, int bitdepth, qmtx_t ** iwmatrix, SAMPLE *pblock_y, SAMPLE *rec_y, int rec_stride, int sub){ int16_t *rcoeff = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock2 = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); SAMPLE* left_data = (SAMPLE*)thor_alloc((2*MAX_TR_SIZE+2)*sizeof(SAMPLE),32)+1; SAMPLE* top_data = (SAMPLE*)thor_alloc((2*MAX_TR_SIZE+2)*sizeof(SAMPLE),32)+1; SAMPLE top_left; if (tb_split){ int size2 = size/2; int i,j,index; for (i=0;i<size;i+=size2){ for (j=0;j<size;j+=size2){ TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec_u,stride,&rec_u[i*stride+j],stride,i,j,ypos,xpos,size2,upright_available,downleft_available,1,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos+i,xpos+j,size2,&pblock_u[i*size+j],size,intra_mode,bitdepth); TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec_v,stride,&rec_v[i*stride+j],stride,i,j,ypos,xpos,size2,upright_available,downleft_available,1,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos+i,xpos+j,size2,&pblock_v[i*size+j],size,intra_mode,bitdepth); if (pblock_y) TEMPLATE(improve_uv_prediction)(&pblock_y[i*size+j], &pblock_u[i*size+j], &pblock_v[i*size+j], &rec_y[(i<<sub)*rec_stride+(j<<sub)], size2 << sub, size << sub, rec_stride, sub, bitdepth); index = 2*(i/size2) + (j/size2); TEMPLATE(dequantize)(coeffq_u+index*size2*size2, rcoeff, qp, size2, iwmatrix ? iwmatrix[log2i(size2/4)] : NULL); inverse_transform (rcoeff, rblock2, size2, bitdepth); TEMPLATE(reconstruct_block)(rblock2,&pblock_u[i*size+j],&rec_u[i*stride+j],size2,size,stride,bitdepth); TEMPLATE(dequantize)(coeffq_v+index*size2*size2, rcoeff, qp, size2, iwmatrix ? iwmatrix[log2i(size2/4)] : NULL); inverse_transform (rcoeff, rblock2, size2, bitdepth); TEMPLATE(reconstruct_block)(rblock2,&pblock_v[i*size+j],&rec_v[i*stride+j],size2,size,stride,bitdepth); } } } else{ TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec_u,stride,NULL,0,0,0,ypos,xpos,size,upright_available,downleft_available,0,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos,xpos,size,pblock_u,size,intra_mode,bitdepth); TEMPLATE(make_top_and_left)(left_data,top_data,&top_left,rec_v,stride,NULL,0,0,0,ypos,xpos,size,upright_available,downleft_available,0,bitdepth); TEMPLATE(get_intra_prediction)(left_data,top_data,top_left,ypos,xpos,size,pblock_v,size,intra_mode,bitdepth); if (pblock_y) TEMPLATE(improve_uv_prediction)(pblock_y, pblock_u, pblock_v, rec_y, size << sub, size << sub, rec_stride, sub, bitdepth); TEMPLATE(dequantize)(coeffq_u, rcoeff, qp, size, iwmatrix ? iwmatrix[log2i(size/4)] : NULL); inverse_transform (rcoeff, rblock, size, bitdepth); TEMPLATE(reconstruct_block)(rblock,pblock_u,rec_u,size,size,stride,bitdepth); TEMPLATE(dequantize)(coeffq_v, rcoeff, qp, size, iwmatrix ? iwmatrix[log2i(size/4)] : NULL); inverse_transform (rcoeff, rblock, size, bitdepth); TEMPLATE(reconstruct_block)(rblock,pblock_v,rec_v,size,size,stride,bitdepth); } thor_free(top_data - 1); thor_free(left_data - 1); thor_free(rcoeff); thor_free(rblock); thor_free(rblock2); } static void decode_and_reconstruct_block_inter (SAMPLE *rec, int stride, int size, int qp, SAMPLE *pblock, int16_t *coeffq,int tb_split, int bitdepth, qmtx_t ** iwmatrix){ int16_t *rcoeff = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *rblock2 = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); if (tb_split){ int size2 = size/2; int i,j,k,index; for (i=0;i<size;i+=size2){ for (j=0;j<size;j+=size2){ index = 2*(i/size2) + (j/size2); TEMPLATE(dequantize)(coeffq+index*size2*size2, rcoeff, qp, size2, iwmatrix ? iwmatrix[log2i(size2/4)] : NULL); inverse_transform (rcoeff, rblock2, size2, bitdepth); /* Copy from compact block of quarter size to full size */ for (k=0;k<size2;k++){ memcpy(rblock+(i+k)*size+j,rblock2+k*size2,size2*sizeof(int16_t)); } } } } else { TEMPLATE(dequantize)(coeffq, rcoeff, qp, size, iwmatrix ? iwmatrix[log2i(size/4)] : NULL); inverse_transform (rcoeff, rblock, size, bitdepth); } TEMPLATE(reconstruct_block)(rblock,pblock,rec,size,size,stride,bitdepth); thor_free(rcoeff); thor_free(rblock); thor_free(rblock2); } static void copy_deblock_data(decoder_info_t *decoder_info, block_info_dec_t *block_info){ int size = block_info->block_pos.size; int block_posy = block_info->block_pos.ypos/MIN_PB_SIZE; int block_posx = block_info->block_pos.xpos/MIN_PB_SIZE; int block_stride = decoder_info->width/MIN_PB_SIZE; int block_index; int m,n,m0,n0,index; int div = size/(2*MIN_PB_SIZE); int bwidth = block_info->block_pos.bwidth; int bheight = block_info->block_pos.bheight; uint8_t tb_split = block_info->block_param.tb_split > 0; part_t pb_part = block_info->block_param.mode == MODE_INTER ? block_info->block_param.pb_part : PART_NONE; //TODO: Set pb_part properly for SKIP and BIPRED for (m=0;m<bheight/MIN_PB_SIZE;m++){ for (n=0;n<bwidth/MIN_PB_SIZE;n++){ block_index = (block_posy+m)*block_stride + block_posx+n; m0 = div > 0 ? m/div : 0; n0 = div > 0 ? n/div : 0; index = 2*m0+n0; if (index > 3) printf("error: index=%4d\n",index); decoder_info->deblock_data[block_index].cbp = block_info->cbp; decoder_info->deblock_data[block_index].tb_split = tb_split; decoder_info->deblock_data[block_index].pb_part = pb_part; decoder_info->deblock_data[block_index].size = block_info->block_pos.size; decoder_info->deblock_data[block_index].mode = block_info->block_param.mode; if (decoder_info->bit_count.stat_frame_type == B_FRAME && decoder_info->interp_ref == 2 && block_info->block_param.mode == MODE_SKIP && block_info->block_param.skip_idx==0) { int phase = decoder_info->frame_info.phase; decoder_info->deblock_data[block_index].inter_pred.mv0 = decoder_info->deblock_data[block_index].inter_pred_arr[phase].mv0; decoder_info->deblock_data[block_index].inter_pred.mv1 = decoder_info->deblock_data[block_index].inter_pred_arr[phase].mv0; if (decoder_info->num_reorder_pics == 2 && phase == 1) { decoder_info->deblock_data[block_index].inter_pred.mv1.x *= 2; decoder_info->deblock_data[block_index].inter_pred.mv1.y *= 2; } } else { decoder_info->deblock_data[block_index].inter_pred.mv0 = block_info->block_param.mv_arr0[index]; decoder_info->deblock_data[block_index].inter_pred.mv1 = block_info->block_param.mv_arr1[index]; } decoder_info->deblock_data[block_index].inter_pred.ref_idx0 = block_info->block_param.ref_idx0; decoder_info->deblock_data[block_index].inter_pred.ref_idx1 = block_info->block_param.ref_idx1; decoder_info->deblock_data[block_index].inter_pred.bipred_flag = block_info->block_param.dir; } } } static void decode_block(decoder_info_t *decoder_info,int size,int ypos,int xpos,int sub){ int width = decoder_info->width; int height = decoder_info->height; int xposY = xpos; int yposY = ypos; int xposC = xpos >> sub; int yposC = ypos >> sub; int sizeY = size; int sizeC = size >> sub; block_mode_t mode; intra_mode_t intra_mode; frame_type_t frame_type = decoder_info->frame_info.frame_type; int bipred = decoder_info->bipred; int qpY = decoder_info->frame_info.qpb; int qpC = sub ? chroma_qp[qpY] : qpY; /* Intermediate block variables */ SAMPLE *pblock_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); int16_t *coeff_y = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *coeff_u = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); int16_t *coeff_v = thor_alloc(2*MAX_TR_SIZE*MAX_TR_SIZE, 32); /* Block variables for bipred */ SAMPLE *pblock0_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock0_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock0_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock1_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); yuv_frame_t *rec = decoder_info->rec; yuv_frame_t *ref = decoder_info->ref[0]; /* Pointers to current position in reconstructed frame*/ SAMPLE *rec_y = &rec->y[yposY*rec->stride_y+xposY]; SAMPLE *rec_u = &rec->u[yposC*rec->stride_c+xposC]; SAMPLE *rec_v = &rec->v[yposC*rec->stride_c+xposC]; stream_t *stream = decoder_info->stream; /* Read data from bitstream */ block_info_dec_t block_info; block_info.block_pos.size = size; block_info.block_pos.ypos = ypos; block_info.block_pos.xpos = xpos; block_info.coeffq_y = coeff_y; block_info.coeffq_u = coeff_u; block_info.coeffq_v = coeff_v; block_info.sub = sub; /* Used for rectangular skip blocks */ int bwidth = min(size,width - xpos); int bheight = min(size,height - ypos); block_info.block_pos.bwidth = bwidth; block_info.block_pos.bheight = bheight; read_block(decoder_info,stream,&block_info,frame_type); mode = block_info.block_param.mode; if (mode == MODE_INTRA){ int ql = decoder_info->qmtx ? qp_to_qlevel(qpY,decoder_info->qmtx_offset) : 0; intra_mode = block_info.block_param.intra_mode; int bwidth = size; //TODO: fix for non-square blocks int bheight = size; //TODO: fix for non-square blocks int upright_available = get_upright_available(yposY, xposY, bwidth, bheight, width, height, 1 << decoder_info->log2_sb_size); int downleft_available = get_downleft_available(yposY, xposY, bwidth, bheight, width, height, 1 << decoder_info->log2_sb_size); //int upright_available = get_upright_available(ypos, xpos, size, width, 1 << decoder_info->log2_sb_size); //int downleft_available = get_downleft_available(ypos, xpos, size, height, 1 << decoder_info->log2_sb_size); int tb_split = block_info.block_param.tb_split; decode_and_reconstruct_block_intra(rec_y,rec->stride_y,sizeY,qpY,pblock_y,coeff_y,tb_split,upright_available,downleft_available,intra_mode,yposY,xposY,width,0,decoder_info->bitdepth,decoder_info->qmtx ? decoder_info->iwmatrix[ql][0][1] : NULL); if (decoder_info->subsample != 400) decode_and_reconstruct_block_intra_uv(rec_u,rec_v,rec->stride_c,sizeC,qpC,pblock_u,pblock_v,coeff_u,coeff_v,tb_split && sizeC > 4,upright_available,downleft_available,intra_mode,yposC,xposC,width>>sub,1,decoder_info->bitdepth,decoder_info->qmtx ? decoder_info->iwmatrix[ql][1][1] : NULL, decoder_info->cfl_intra ? pblock_y : 0, rec_y, rec->stride_y, sub); } else { int tb_split = block_info.block_param.tb_split; if (mode==MODE_SKIP){ if (block_info.block_param.dir==2){ SAMPLE *pblock0_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock0_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock0_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock1_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); int r0 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx0]; yuv_frame_t *ref0 = r0 >= 0 ? decoder_info->ref[r0] : decoder_info->interp_frames[0]; int sign0 = ref0->frame_num >= rec->frame_num; int r1 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx1]; yuv_frame_t *ref1 = r1 >= 0 ? decoder_info->ref[r1] : decoder_info->interp_frames[0]; int sign1 = ref1->frame_num >= rec->frame_num; if (decoder_info->bit_count.stat_frame_type == B_FRAME && decoder_info->interp_ref == 2 && block_info.block_param.skip_idx==0) { TEMPLATE(get_inter_prediction_temp)(width, height, ref0, ref1, &block_info.block_pos, decoder_info->deblock_data, decoder_info->num_reorder_pics + 1, decoder_info->frame_info.phase, pblock_y, pblock_u, pblock_v); } else { TEMPLATE(get_inter_prediction_yuv)(ref0, pblock0_y, pblock0_u, pblock0_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign0, width, height, bipred, 0, decoder_info->bitdepth); TEMPLATE(get_inter_prediction_yuv)(ref1, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, block_info.block_param.mv_arr1, sign1, width, height, bipred, 0, decoder_info->bitdepth); TEMPLATE(average_blocks_all)(pblock_y, pblock_u, pblock_v, pblock0_y, pblock0_u, pblock0_v, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, sub); } thor_free(pblock0_y); thor_free(pblock0_u); thor_free(pblock0_v); thor_free(pblock1_y); thor_free(pblock1_u); thor_free(pblock1_v); } else{ int ref_idx = block_info.block_param.ref_idx0; //TODO: Move to top int r = decoder_info->frame_info.ref_array[ref_idx]; ref = r>=0 ? decoder_info->ref[r] : decoder_info->interp_frames[0]; int sign = ref->frame_num > rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref, pblock_y, pblock_u, pblock_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign, width, height, bipred, 0, decoder_info->bitdepth); } int j; for (j = 0; j<bheight; j++) { memcpy(&rec_y[j*rec->stride_y], &pblock_y[j*sizeY], bwidth*sizeof(SAMPLE)); } for (j = 0; j<bheight >> sub; j++) { memcpy(&rec_u[j*rec->stride_c], &pblock_u[j*sizeC], (bwidth >> sub)*sizeof(SAMPLE)); memcpy(&rec_v[j*rec->stride_c], &pblock_v[j*sizeC], (bwidth >> sub)*sizeof(SAMPLE)); } copy_deblock_data(decoder_info, &block_info); return; } else if (mode==MODE_MERGE){ if (block_info.block_param.dir==2){ SAMPLE *pblock0_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock0_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock0_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock1_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); int r0 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx0]; yuv_frame_t *ref0 = r0 >= 0 ? decoder_info->ref[r0] : decoder_info->interp_frames[0]; int sign0 = ref0->frame_num >= rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref0, pblock0_y, pblock0_u, pblock0_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign0, width, height, bipred, 0, decoder_info->bitdepth); int r1 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx1]; yuv_frame_t *ref1 = r1 >= 0 ? decoder_info->ref[r1] : decoder_info->interp_frames[0]; int sign1 = ref1->frame_num >= rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref1, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, block_info.block_param.mv_arr1, sign1, width, height, bipred, 0, decoder_info->bitdepth); TEMPLATE(average_blocks_all)(pblock_y, pblock_u, pblock_v, pblock0_y, pblock0_u, pblock0_v, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, sub); thor_free(pblock0_y); thor_free(pblock0_u); thor_free(pblock0_v); thor_free(pblock1_y); thor_free(pblock1_u); thor_free(pblock1_v); } else{ int ref_idx = block_info.block_param.ref_idx0; //TODO: Move to top int r = decoder_info->frame_info.ref_array[ref_idx]; ref = r>=0 ? decoder_info->ref[r] : decoder_info->interp_frames[0]; int sign = ref->frame_num > rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref, pblock_y, pblock_u, pblock_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign, width, height, bipred, 0, decoder_info->bitdepth); } } else if (mode == MODE_INTER){ int ref_idx = block_info.block_param.ref_idx0; int r = decoder_info->frame_info.ref_array[ref_idx]; ref = r>=0 ? decoder_info->ref[r] : decoder_info->interp_frames[0]; int sign = ref->frame_num > rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref, pblock_y, pblock_u, pblock_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign, width, height, bipred, decoder_info->pb_split, decoder_info->bitdepth); } else if (mode == MODE_BIPRED){ SAMPLE *pblock0_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock0_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock0_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_y = thor_alloc(MAX_SB_SIZE*MAX_SB_SIZE*sizeof(SAMPLE), 32); SAMPLE *pblock1_u = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); SAMPLE *pblock1_v = thor_alloc((MAX_SB_SIZE*MAX_SB_SIZE >> 2*sub)*sizeof(SAMPLE), 32); int r0 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx0]; yuv_frame_t *ref0 = r0 >= 0 ? decoder_info->ref[r0] : decoder_info->interp_frames[0]; int sign0 = ref0->frame_num >= rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref0, pblock0_y, pblock0_u, pblock0_v, &block_info.block_pos, block_info.block_param.mv_arr0, sign0, width, height, bipred, decoder_info->pb_split, decoder_info->bitdepth); int r1 = decoder_info->frame_info.ref_array[block_info.block_param.ref_idx1]; yuv_frame_t *ref1 = r1 >= 0 ? decoder_info->ref[r1] : decoder_info->interp_frames[0]; int sign1 = ref1->frame_num >= rec->frame_num; TEMPLATE(get_inter_prediction_yuv)(ref1, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, block_info.block_param.mv_arr1, sign1, width, height, bipred, decoder_info->pb_split, decoder_info->bitdepth); TEMPLATE(average_blocks_all)(pblock_y, pblock_u, pblock_v, pblock0_y, pblock0_u, pblock0_v, pblock1_y, pblock1_u, pblock1_v, &block_info.block_pos, sub); thor_free(pblock0_y); thor_free(pblock0_u); thor_free(pblock0_v); thor_free(pblock1_y); thor_free(pblock1_u); thor_free(pblock1_v); } /* Dequantize, invere tranform and reconstruct */ int ql = decoder_info->qmtx ? qp_to_qlevel(qpY,decoder_info->qmtx_offset) : 0; decode_and_reconstruct_block_inter(rec_y,rec->stride_y,sizeY,qpY,pblock_y,coeff_y,tb_split,decoder_info->bitdepth,decoder_info->qmtx ? decoder_info->iwmatrix[ql][0][0] : NULL); // Use reconstructed luma to improve chroma prediction if (decoder_info->cfl_inter && decoder_info->subsample != 400) TEMPLATE(improve_uv_prediction)(pblock_y, pblock_u, pblock_v, rec_y, sizeY, sizeY, rec->stride_y, sub, decoder_info->bitdepth); decode_and_reconstruct_block_inter(rec_u,rec->stride_c,sizeC,qpC,pblock_u,coeff_u,tb_split&&sizeC>4,decoder_info->bitdepth,decoder_info->qmtx ? decoder_info->iwmatrix[ql][1][0] : NULL); decode_and_reconstruct_block_inter(rec_v,rec->stride_c,sizeC,qpC,pblock_v,coeff_v,tb_split&&sizeC>4,decoder_info->bitdepth,decoder_info->qmtx ? decoder_info->iwmatrix[ql][2][0] : NULL); } /* Copy deblock data to frame array */ copy_deblock_data(decoder_info,&block_info); thor_free(pblock0_y); thor_free(pblock0_u); thor_free(pblock0_v); thor_free(pblock1_y); thor_free(pblock1_u); thor_free(pblock1_v); thor_free(pblock_y); thor_free(pblock_u); thor_free(pblock_v); thor_free(coeff_y); thor_free(coeff_u); thor_free(coeff_v); } static int decode_super_mode(decoder_info_t *decoder_info, int size, int decode_this_size){ stream_t *stream = decoder_info->stream; block_context_t *block_context = decoder_info->block_context; frame_type_t frame_type = decoder_info->frame_info.frame_type; int split_flag = 0; int mode = MODE_SKIP; int stat_mode = STAT_SKIP; int num_ref=0,code,maxbit; int idx = log2i(size)-3; decoder_info->mode = MODE_SKIP; //Default initial value if (frame_type==I_FRAME){ decoder_info->mode = MODE_INTRA; if (size > MIN_BLOCK_SIZE && decode_this_size) split_flag = get_flc(1, stream); else split_flag = !decode_this_size; return split_flag; } if (!decode_this_size) { split_flag = !get_flc(1, stream); return split_flag; } if (size > MAX_TR_SIZE) { split_flag = !get_flc(1, stream); if (!split_flag) decoder_info->mode = MODE_SKIP; return split_flag; } num_ref = decoder_info->frame_info.num_ref; int bipred_possible_flag = num_ref > 1 && decoder_info->bipred; int split_possible_flag = size > MIN_BLOCK_SIZE; maxbit = 2 + num_ref + split_possible_flag + bipred_possible_flag; int interp_ref = decoder_info->frame_info.interp_ref; if (interp_ref > 2) { maxbit -= 1; //ref_idx = 0 is disallowed } code = get_vlc(10 + maxbit, stream); if (interp_ref) { if ((block_context->index == 2 || block_context->index>3) && size>MIN_BLOCK_SIZE){ /* Move skip down the list */ if (code<3) code = (code + 1) % 3; } if (split_possible_flag && code==1) { /* Set split flag and return */ split_flag = 1; decoder_info->bit_count.super_mode_stat[decoder_info->bit_count.stat_frame_type][idx][STAT_SPLIT] += 1; return split_flag; } if (!split_possible_flag && code > 0) { /* Didn't need a codeword for split so adjust for the empty slot */ code += 1; } if (!bipred_possible_flag && code >= 3) { /* Don't need a codeword for bipred so adjust for the empty slot */ code += 1; } if (code == 0) { mode = MODE_SKIP; stat_mode = STAT_SKIP; } else if (code == 2) { mode = MODE_MERGE; stat_mode = STAT_MERGE; } else if (code == 3) { mode = MODE_BIPRED; stat_mode = STAT_BIPRED; } else if (code == 4) { mode = MODE_INTRA; stat_mode = STAT_INTRA; } else if (code == 4 + num_ref) { mode = MODE_INTER; decoder_info->ref_idx = 0; stat_mode = STAT_REF_IDX0; } else{ mode = MODE_INTER; decoder_info->ref_idx = code - 4; stat_mode = STAT_REF_IDX1 + decoder_info->ref_idx-1; } decoder_info->mode = mode; } else { if ((block_context->index == 2 || block_context->index>3) && size>MIN_BLOCK_SIZE){ /* Skip is less likely than split, merge and inter-ref_idx=0 so move skip down the list */ if (code<4) code = (code + 1) % 4; } if (split_possible_flag && code==1) { /* Set split flag and return */ split_flag = 1; decoder_info->bit_count.super_mode_stat[decoder_info->bit_count.stat_frame_type][idx][STAT_SPLIT] += 1; return split_flag; } if (!split_possible_flag && code > 0) { /* Didn't need a codeword for split so adjust for the empty slot */ code += 1; } if (!bipred_possible_flag && code >= 4) { /* Don't need a codeword for bipred so adjust for the empty slot */ code += 1; } if (code == 0) { mode = MODE_SKIP; stat_mode = STAT_SKIP; } else if (code == 2) { mode = MODE_INTER; decoder_info->ref_idx = 0; stat_mode = STAT_REF_IDX0; } else if (code == 3) { mode = MODE_MERGE; stat_mode = STAT_MERGE; } else if (code == 4) { mode = MODE_BIPRED; stat_mode = STAT_BIPRED; } else if (code == 5) { mode = MODE_INTRA; stat_mode = STAT_INTRA; } else{ mode = MODE_INTER; decoder_info->ref_idx = code - 5; stat_mode = STAT_REF_IDX1 + decoder_info->ref_idx - 1; } decoder_info->mode = mode; } decoder_info->bit_count.super_mode_stat[decoder_info->bit_count.stat_frame_type][idx][stat_mode] += 1; return split_flag; } void TEMPLATE(process_block_dec)(decoder_info_t *decoder_info,int size,int yposY,int xposY,int sub) { int width = decoder_info->width; int height = decoder_info->height; stream_t *stream = decoder_info->stream; frame_type_t frame_type = decoder_info->frame_info.frame_type; int split_flag = 0; if (yposY >= height || xposY >= width) return; int decode_this_size = (yposY + size <= height) && (xposY + size <= width); int decode_rectangular_size = !decode_this_size && frame_type != I_FRAME; int bit_start = stream->bitcnt; int mode = MODE_SKIP; block_context_t block_context; TEMPLATE(find_block_contexts)(yposY, xposY, height, width, size, decoder_info->deblock_data, &block_context, decoder_info->use_block_contexts); decoder_info->block_context = &block_context; split_flag = decode_super_mode(decoder_info,size,decode_this_size); mode = decoder_info->mode; /* Read delta_qp and set block-level qp */ if (size == (1<<decoder_info->log2_sb_size) && (split_flag || mode != MODE_SKIP) && decoder_info->max_delta_qp > 0) { /* Read delta_qp */ int delta_qp = read_delta_qp(stream); int prev_qp; if (yposY == 0 && xposY == 0) prev_qp = decoder_info->frame_info.qp; else prev_qp = decoder_info->frame_info.qpb; decoder_info->frame_info.qpb = prev_qp + delta_qp; } decoder_info->bit_count.super_mode[decoder_info->bit_count.stat_frame_type] += (stream->bitcnt - bit_start); if (split_flag && size >= MIN_BLOCK_SIZE){ int new_size = size/2; TEMPLATE(process_block_dec)(decoder_info,new_size,yposY+0*new_size,xposY+0*new_size,sub); TEMPLATE(process_block_dec)(decoder_info,new_size,yposY+1*new_size,xposY+0*new_size,sub); TEMPLATE(process_block_dec)(decoder_info,new_size,yposY+0*new_size,xposY+1*new_size,sub); TEMPLATE(process_block_dec)(decoder_info,new_size,yposY+1*new_size,xposY+1*new_size,sub); } else if (decode_this_size || decode_rectangular_size){ decode_block(decoder_info,size,yposY,xposY,sub); } }
./CrossVul/dataset_final_sorted/CWE-119/c/good_1_0
crossvul-cpp_data_bad_143_0
/* radare - LGPL - Copyright 2017 - pancake, cgvwzq */ // http://webassembly.org/docs/binary-encoding/#module-structure #include <r_asm.h> #include <r_lib.h> #include <string.h> #include "../../asm/arch/wasm/wasm.h" #include "../../bin/format/wasm/wasm.h" static WasmOpDef opcodes[256] = { [WASM_OP_TRAP] = { "trap", 1, 1 }, [WASM_OP_NOP] = { "nop", 1, 1 }, [WASM_OP_BLOCK] = { "block", 2, 2 }, [WASM_OP_LOOP] = { "loop", 2, 2 }, [WASM_OP_IF] = { "if", 2, 2 }, [WASM_OP_ELSE] = { "else", 1, 1 }, [WASM_OP_END] = { "end", 1, 1 }, [WASM_OP_BR] = { "br", 2, 2 }, [WASM_OP_BRIF] = { "br_if", 2, 2 }, [WASM_OP_BRTABLE] = { "brtable", 3, 0 }, [WASM_OP_RETURN] = { "return", 1, 1 }, [WASM_OP_CALL] = { "call" , 2, 2 }, [WASM_OP_CALLINDIRECT] = { "call_indirect", 3, 3 }, [WASM_OP_DROP] = { "drop", 1, 1 }, [WASM_OP_SELECT] = { "select", 1, 1 }, [WASM_OP_GETLOCAL] = { "get_local", 2, 2 }, [WASM_OP_SETLOCAL] = { "set_local", 2, 2 }, [WASM_OP_TEELOCAL] = { "tee_local", 2, 2 }, [WASM_OP_GETGLOBAL] = { "get_global", 2, 2 }, [WASM_OP_SETGLOBAL] = { "set_global", 2, 2 }, [WASM_OP_I32LOAD] = { "i32.load", 3, 3 }, [WASM_OP_I64LOAD] = { "i64.load", 3, 3 }, [WASM_OP_F32LOAD] = { "f32.load", 3, 3 }, [WASM_OP_F64LOAD] = { "f64.load", 3, 3 }, [WASM_OP_I32LOAD8S] = { "i32.load8_s", 3, 3 }, [WASM_OP_I32LOAD8U] = { "i32.load8_u", 3, 3 }, [WASM_OP_I32LOAD16S] = { "i32.load16_s", 3, 3 }, [WASM_OP_I32LOAD16U] = { "i32.load_16_u", 3, 3 }, [WASM_OP_I64LOAD8S] = { "i64.load8_s", 3, 3 }, [WASM_OP_I64LOAD8U] = { "i64.load8_u", 3, 3 }, [WASM_OP_I64LOAD16S] = { "i64.load16_s", 3, 3 }, [WASM_OP_I64LOAD16U] = { "i64.load16_u", 3, 3 }, [WASM_OP_I64LOAD32S] = { "i64.load32_s", 3, 3 }, [WASM_OP_I64LOAD32U] = { "i64.load32_u", 3, 3 }, [WASM_OP_I32STORE] = { "i32.store", 3, 3 }, [WASM_OP_I64STORE] = { "i64.store", 3, 3 }, [WASM_OP_F32STORE] = { "f32.store", 3, 3 }, [WASM_OP_F64STORE] = { "f64.store", 3, 3 }, [WASM_OP_I32STORE8] = { "i32.store8", 3, 3 }, [WASM_OP_I32STORE16] = { "i32.store16", 3, 3 }, [WASM_OP_I64STORE8] = { "i64.store8", 3, 3 }, [WASM_OP_I64STORE16] = { "i64.store16", 3, 3 }, [WASM_OP_I64STORE32] = { "i64.store32", 3, 3 }, [WASM_OP_CURRENTMEMORY] = { "current_memory", 2, 2 }, [WASM_OP_GROWMEMORY] = { "grow_memory", 2, 2 }, [WASM_OP_I32CONST] = { "i32.const", 2, 2 }, [WASM_OP_I64CONST] = { "i64.const", 2, 2 }, [WASM_OP_F32CONST] = { "f32.const", 2, 2 }, [WASM_OP_F64CONST] = { "f64.const", 2, 2 }, [WASM_OP_I32EQZ] = { "i32.eqz", 1, 1 }, [WASM_OP_I32EQ] = { "i32.eq", 1, 1 }, [WASM_OP_I32NE] = { "i32.ne", 1, 1}, [WASM_OP_I32LTS] = { "i32.lt_s", 1, 1 }, [WASM_OP_I32LTU] = { "i32.lt_u", 1, 1 }, [WASM_OP_I32GTS] = { "i32.gt_s", 1, 1 }, [WASM_OP_I32GTU] = { "i32.gt_u", 1, 1 }, [WASM_OP_I32LES] = { "i32.le_s", 1, 1 }, [WASM_OP_I32LEU] = { "i32.le_u", 1, 1 }, [WASM_OP_I32GES] = { "i32.ge_s", 1, 1 }, [WASM_OP_I32GEU] = { "i32.ge_u", 1, 1 }, [WASM_OP_I64EQZ] = { "i64.eqz", 1, 1 }, [WASM_OP_I64EQ] = {" i64.eq", 1, 1 }, [WASM_OP_I64NE] = {" i64.ne", 1, 1 }, [WASM_OP_I64LTS] = { "i64.lt_s", 1, 1 }, [WASM_OP_I64LTU] = { "i64.lt_u", 1, 1 }, [WASM_OP_I64GTS] = { "i64.gt_s", 1, 1 }, [WASM_OP_I64GTU] = { "i64.gt_u", 1, 1 }, [WASM_OP_I64LES] = { "i64.le_s", 1, 1 }, [WASM_OP_I64LEU] = { "i64.le_u", 1, 1 }, [WASM_OP_I64GES] = { "i64.ge_s", 1, 1 }, [WASM_OP_I64GEU] = { "i64.ge_u", 1, 1 }, [WASM_OP_F32EQ] = { "f32.eq", 1, 1 }, [WASM_OP_F32NE] = { "f32.ne", 1, 1 }, [WASM_OP_F32LT] = { "f32.lt", 1, 1 }, [WASM_OP_F32GT] = { "f32.gt", 1, 1 }, [WASM_OP_F32LE] = { "f32.le", 1, 1 }, [WASM_OP_F32GE] = { "f32.ge", 1, 1 }, [WASM_OP_F64EQ] = { "f64.eq", 1, 1 }, [WASM_OP_F64NE] = { "f64.ne", 1, 1 }, [WASM_OP_F64LT] = { "f64.lt", 1, 1 }, [WASM_OP_F64GT] = { "f64.gt", 1, 1 }, [WASM_OP_F64LE] = { "f64.le", 1, 1 }, [WASM_OP_F64GE] = { "f64.ge", 1, 1 }, [WASM_OP_I32CLZ] = { "i32.clz", 1, 1 }, [WASM_OP_I32CTZ] = { "i32.ctz", 1, 1 }, [WASM_OP_I32POPCNT] = { "i32.popcnt", 1, 1 }, [WASM_OP_I32ADD] = { "i32.add", 1, 1 }, [WASM_OP_I32SUB] = { "i32.sub", 1, 1 }, [WASM_OP_I32MUL] = { "i32.mul", 1, 1 }, [WASM_OP_I32DIVS] = { "i32.div_s", 1, 1 }, [WASM_OP_I32DIVU] = { "i32.div_u", 1, 1 }, [WASM_OP_I32REMS] = { "i32.rem_s", 1, 1 }, [WASM_OP_I32REMU] = { "i32.rem_u", 1, 1 }, [WASM_OP_I32AND] = { "i32.and", 1, 1 }, [WASM_OP_I32OR] = { "i32.or", 1, 1 }, [WASM_OP_I32XOR] = { "i32.xor", 1, 1 }, [WASM_OP_I32SHL] = { "i32.shl", 1, 1 }, [WASM_OP_I32SHRS] = { "i32.shr_s", 1, 1 }, [WASM_OP_I32SHRU] = { "i32.shr_u", 1, 1 }, [WASM_OP_I32ROTL] = { "i32.rotl", 1, 1 }, [WASM_OP_I32ROTR] = { "i32.rotr", 1, 1 }, [WASM_OP_I64CLZ] = { "i64.clz", 1, 1 }, [WASM_OP_I64CTZ] = { "i64.ctz", 1, 1 }, [WASM_OP_I64POPCNT] = { "i64.popcnt", 1, 1 }, [WASM_OP_I64ADD] = { "i64.add", 1, 1 }, [WASM_OP_I64SUB] = { "i64.sub", 1, 1 }, [WASM_OP_I64MUL] = { "i64.mul", 1, 1 }, [WASM_OP_I64DIVS] = { "i64.div_s", 1, 1 }, [WASM_OP_I64DIVU] = { "i64.div_u", 1, 1 }, [WASM_OP_I64REMS] = { "i64.rem_s", 1, 1 }, [WASM_OP_I64REMU] = { "i64.rem_u", 1, 1 }, [WASM_OP_I64AND] = { "i64.and", 1, 1 }, [WASM_OP_I64OR] = { "i64.or", 1, 1 }, [WASM_OP_I64XOR] = { "i64.xor", 1, 1 }, [WASM_OP_I64SHL] = { "i64.shl", 1, 1 }, [WASM_OP_I64SHRS] = { "i64.shr_s", 1, 1 }, [WASM_OP_I64SHRU] = { "i64.shr_u", 1, 1 }, [WASM_OP_I64ROTL] = { "i64.rotl", 1, 1 }, [WASM_OP_I64ROTR] = { "i64.rotr", 1, 1 }, [WASM_OP_F32ABS] = { "f32.abs", 1, 1 }, [WASM_OP_F32NEG] = { "f32.neg", 1, 1 }, [WASM_OP_F32CEIL] = { "f32.ceil", 1, 1 }, [WASM_OP_F32FLOOR] = { "f32.floor", 1, 1 }, [WASM_OP_F32TRUNC] = { "f32.trunc", 1, 1 }, [WASM_OP_F32NEAREST] = { "f32.nearest", 1, 1 }, [WASM_OP_F32SQRT] = { "f32.sqrt", 1, 1 }, [WASM_OP_F32ADD] = { "f32.add", 1, 1 }, [WASM_OP_F32SUB] = { "f32.sub", 1, 1 }, [WASM_OP_F32MUL] = { "f32.mul", 1, 1 }, [WASM_OP_F32DIV] = { "f32.div", 1, 1 }, [WASM_OP_F32MIN] = { "f32.min", 1, 1 }, [WASM_OP_F32MAX] = { "f32.max", 1, 1 }, [WASM_OP_F32COPYSIGN] = {" f32.copysign", 1, 1 }, [WASM_OP_F64ABS] = { "f64.abs", 1, 1 }, [WASM_OP_F64NEG] = { "f64.neg", 1, 1 }, [WASM_OP_F64CEIL] = { "f64.ceil", 1, 1 }, [WASM_OP_F64FLOOR] = { "f64.floor", 1, 1 }, [WASM_OP_F64TRUNC] = { "f64.trunc", 1, 1 }, [WASM_OP_F64NEAREST] = { "f64.nearest", 1, 1 }, [WASM_OP_F64SQRT] = { "f64.sqrt", 1, 1 }, [WASM_OP_F64ADD] = { "f64.add", 1, 1 }, [WASM_OP_F64SUB] = { "f64.sub", 1, 1 }, [WASM_OP_F64MUL] = { "f64.mul", 1, 1 }, [WASM_OP_F64DIV] = { "f64.div", 1, 1 }, [WASM_OP_F64MIN] = { "f64.min", 1, 1 }, [WASM_OP_F64MAX] = { "f64.max", 1, 1 }, [WASM_OP_F64COPYSIGN] = { "f64.copysign", 1, 1 }, [WASM_OP_I32WRAPI64] = { "i32.wrap/i64", 1, 1 }, [WASM_OP_I32TRUNCSF32] = { "i32.trunc_s/f32", 1, 1 }, [WASM_OP_I32TRUNCUF32] = { "i32.trunc_u/f32", 1, 1 }, [WASM_OP_I32TRUNCSF64] = { "i32.trunc_s/f64", 1, 1 }, [WASM_OP_I32TRUNCUF64] = { "i32.trunc_u/f64", 1, 1 }, [WASM_OP_I64EXTENDSI32] = { "i64.extend_s/i32", 1, 1 }, [WASM_OP_I64EXTENDUI32] = { "i64.extend_u/i32", 1, 1 }, [WASM_OP_I64TRUNCSF32] = { "i64.trunc_s/f32", 1, 1 }, [WASM_OP_I64TRUNCUF32] = { "i64.trunc_u/f32", 1, 1 }, [WASM_OP_I64TRUNCSF64] = { "i64.trunc_s/f64", 1, 1 }, [WASM_OP_I64TRUNCUF64] = { "i64.trunc_u/f64", 1, 1 }, [WASM_OP_F32CONVERTSI32] = { "f32.convert_s/i32", 1, 1 }, [WASM_OP_F32CONVERTUI32] = { "f32.convert_u/i32", 1, 1 }, [WASM_OP_F32CONVERTSI64] = { "f32.convert_s/i64", 1, 1 }, [WASM_OP_F32CONVERTUI64] = { "f32.convert_u/i64", 1, 1 }, [WASM_OP_F32DEMOTEF64] = { "f32.demote/f64", 1, 1 }, [WASM_OP_F64CONVERTSI32] = { "f64.convert_s/i32", 1, 1 }, [WASM_OP_F64CONVERTUI32] = { "f64.convert_u/i32", 1, 1 }, [WASM_OP_F64CONVERTSI64] = { "f64.convert_s/i64", 1, 1 }, [WASM_OP_F64CONVERTUI64] = { "f64.convert_u/i64", 1, 1 }, [WASM_OP_F64PROMOTEF32] = { "f64.promote/f32", 1, 1 }, [WASM_OP_I32REINTERPRETF32] = { "i32.reinterpret/f32", 1, 1 }, [WASM_OP_I64REINTERPRETF64] = { "i64.reinterpret/f64", 1, 1 }, [WASM_OP_F32REINTERPRETI32] = { "f32.reinterpret/i32", 1, 1 }, [WASM_OP_F64REINTERPRETI64] = { "f64/reinterpret/i64", 1, 1 } }; int wasm_asm(const char *str, unsigned char *buf, int buf_len) { // TODO: add immediates assembly int i = 0, len = -1; char tmp[R_ASM_BUFSIZE]; while (str[i] != ' ' && i < buf_len) { tmp[i] = str[i]; i++; } tmp[i] = 0; for (i = 0; i < 0xff; i++) { WasmOpDef *opdef = &opcodes[i]; if (opdef->txt) { if (!strcmp (opdef->txt, tmp)) { buf[0] = i; return 1; } } } return len; } int wasm_dis(WasmOp *op, const unsigned char *buf, int buf_len) { op->len = 1; op->op = buf[0]; if (op->op > 0xbf) { return 1; } // add support for extension opcodes (SIMD + atomics) WasmOpDef *opdef = &opcodes[op->op]; switch (op->op) { case WASM_OP_TRAP: case WASM_OP_NOP: case WASM_OP_ELSE: case WASM_OP_RETURN: case WASM_OP_DROP: case WASM_OP_SELECT: case WASM_OP_I32EQZ: case WASM_OP_I32EQ: case WASM_OP_I32NE: case WASM_OP_I32LTS: case WASM_OP_I32LTU: case WASM_OP_I32GTS: case WASM_OP_I32GTU: case WASM_OP_I32LES: case WASM_OP_I32LEU: case WASM_OP_I32GES: case WASM_OP_I32GEU: case WASM_OP_I64EQZ: case WASM_OP_I64EQ: case WASM_OP_I64NE: case WASM_OP_I64LTS: case WASM_OP_I64LTU: case WASM_OP_I64GTS: case WASM_OP_I64GTU: case WASM_OP_I64LES: case WASM_OP_I64LEU: case WASM_OP_I64GES: case WASM_OP_I64GEU: case WASM_OP_F32EQ: case WASM_OP_F32NE: case WASM_OP_F32LT: case WASM_OP_F32GT: case WASM_OP_F32LE: case WASM_OP_F32GE: case WASM_OP_F64EQ: case WASM_OP_F64NE: case WASM_OP_F64LT: case WASM_OP_F64GT: case WASM_OP_F64LE: case WASM_OP_F64GE: case WASM_OP_I32CLZ: case WASM_OP_I32CTZ: case WASM_OP_I32POPCNT: case WASM_OP_I32ADD: case WASM_OP_I32SUB: case WASM_OP_I32MUL: case WASM_OP_I32DIVS: case WASM_OP_I32DIVU: case WASM_OP_I32REMS: case WASM_OP_I32REMU: case WASM_OP_I32AND: case WASM_OP_I32OR: case WASM_OP_I32XOR: case WASM_OP_I32SHL: case WASM_OP_I32SHRS: case WASM_OP_I32SHRU: case WASM_OP_I32ROTL: case WASM_OP_I32ROTR: case WASM_OP_I64CLZ: case WASM_OP_I64CTZ: case WASM_OP_I64POPCNT: case WASM_OP_I64ADD: case WASM_OP_I64SUB: case WASM_OP_I64MUL: case WASM_OP_I64DIVS: case WASM_OP_I64DIVU: case WASM_OP_I64REMS: case WASM_OP_I64REMU: case WASM_OP_I64AND: case WASM_OP_I64OR: case WASM_OP_I64XOR: case WASM_OP_I64SHL: case WASM_OP_I64SHRS: case WASM_OP_I64SHRU: case WASM_OP_I64ROTL: case WASM_OP_I64ROTR: case WASM_OP_F32ABS: case WASM_OP_F32NEG: case WASM_OP_F32CEIL: case WASM_OP_F32FLOOR: case WASM_OP_F32TRUNC: case WASM_OP_F32NEAREST: case WASM_OP_F32SQRT: case WASM_OP_F32ADD: case WASM_OP_F32SUB: case WASM_OP_F32MUL: case WASM_OP_F32DIV: case WASM_OP_F32MIN: case WASM_OP_F32MAX: case WASM_OP_F32COPYSIGN: case WASM_OP_F64ABS: case WASM_OP_F64NEG: case WASM_OP_F64CEIL: case WASM_OP_F64FLOOR: case WASM_OP_F64TRUNC: case WASM_OP_F64NEAREST: case WASM_OP_F64SQRT: case WASM_OP_F64ADD: case WASM_OP_F64SUB: case WASM_OP_F64MUL: case WASM_OP_F64DIV: case WASM_OP_F64MIN: case WASM_OP_F64MAX: case WASM_OP_F64COPYSIGN: case WASM_OP_I32WRAPI64: case WASM_OP_I32TRUNCSF32: case WASM_OP_I32TRUNCUF32: case WASM_OP_I32TRUNCSF64: case WASM_OP_I32TRUNCUF64: case WASM_OP_I64EXTENDSI32: case WASM_OP_I64EXTENDUI32: case WASM_OP_I64TRUNCSF32: case WASM_OP_I64TRUNCUF32: case WASM_OP_I64TRUNCSF64: case WASM_OP_I64TRUNCUF64: case WASM_OP_F32CONVERTSI32: case WASM_OP_F32CONVERTUI32: case WASM_OP_F32CONVERTSI64: case WASM_OP_F32CONVERTUI64: case WASM_OP_F32DEMOTEF64: case WASM_OP_F64CONVERTSI32: case WASM_OP_F64CONVERTUI32: case WASM_OP_F64CONVERTSI64: case WASM_OP_F64CONVERTUI64: case WASM_OP_F64PROMOTEF32: case WASM_OP_I32REINTERPRETF32: case WASM_OP_I64REINTERPRETF64: case WASM_OP_F32REINTERPRETI32: case WASM_OP_F64REINTERPRETI64: case WASM_OP_END: { snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt); } break; case WASM_OP_BLOCK: case WASM_OP_LOOP: case WASM_OP_IF: { st32 val = 0; size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; switch (0x80 - val) { case R_BIN_WASM_VALUETYPE_EMPTY: snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt); break; case R_BIN_WASM_VALUETYPE_i32: snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i32)", opdef->txt); break; case R_BIN_WASM_VALUETYPE_i64: snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i64)", opdef->txt); break; case R_BIN_WASM_VALUETYPE_f32: snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f32)", opdef->txt); break; case R_BIN_WASM_VALUETYPE_f64: snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f64)", opdef->txt); break; default: snprintf (op->txt, R_ASM_BUFSIZE, "%s (result ?)", opdef->txt); break; } op->len += n; } break; case WASM_OP_BR: case WASM_OP_BRIF: case WASM_OP_CALL: { ut32 val = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val); op->len += n; } break; case WASM_OP_BRTABLE: { ut32 count = 0, *table = NULL, def = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &count); if (!(n > 0 && n < buf_len)) { goto err; } if (!(table = calloc (count, sizeof (ut32)))) { goto err; } int i = 0; op->len += n; for (i = 0; i < count; i++) { n = read_u32_leb128 (buf + op->len, buf + buf_len, &table[i]); if (!(op->len + n <= buf_len)) { goto beach; } op->len += n; } n = read_u32_leb128 (buf + op->len, buf + buf_len, &def); if (!(n > 0 && n + op->len < buf_len)) { goto beach; } op->len += n; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d ", opdef->txt, count); for (i = 0; i < count && strlen (op->txt) + 10 < R_ASM_BUFSIZE; i++) { int optxtlen = strlen (op->txt); snprintf (op->txt + optxtlen, R_ASM_BUFSIZE - optxtlen, "%d ", table[i]); } snprintf (op->txt + strlen (op->txt), R_ASM_BUFSIZE, "%d", def); free (table); break; beach: free (table); goto err; } break; case WASM_OP_CALLINDIRECT: { ut32 val = 0, reserved = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; op->len += n; n = read_u32_leb128 (buf + op->len, buf + buf_len, &reserved); if (!(n == 1 && op->len + n <= buf_len)) goto err; reserved &= 0x1; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, val, reserved); op->len += n; } break; case WASM_OP_GETLOCAL: case WASM_OP_SETLOCAL: case WASM_OP_TEELOCAL: case WASM_OP_GETGLOBAL: case WASM_OP_SETGLOBAL: { ut32 val = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val); op->len += n; } break; case WASM_OP_I32LOAD: case WASM_OP_I64LOAD: case WASM_OP_F32LOAD: case WASM_OP_F64LOAD: case WASM_OP_I32LOAD8S: case WASM_OP_I32LOAD8U: case WASM_OP_I32LOAD16S: case WASM_OP_I32LOAD16U: case WASM_OP_I64LOAD8S: case WASM_OP_I64LOAD8U: case WASM_OP_I64LOAD16S: case WASM_OP_I64LOAD16U: case WASM_OP_I64LOAD32S: case WASM_OP_I64LOAD32U: case WASM_OP_I32STORE: case WASM_OP_I64STORE: case WASM_OP_F32STORE: case WASM_OP_F64STORE: case WASM_OP_I32STORE8: case WASM_OP_I32STORE16: case WASM_OP_I64STORE8: case WASM_OP_I64STORE16: case WASM_OP_I64STORE32: { ut32 flag = 0, offset = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &flag); if (!(n > 0 && n < buf_len)) goto err; op->len += n; n = read_u32_leb128 (buf + op->len, buf + buf_len, &offset); if (!(n > 0 && op->len + n <= buf_len)) goto err; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, flag, offset); op->len += n; } break; case WASM_OP_CURRENTMEMORY: case WASM_OP_GROWMEMORY: { ut32 reserved = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &reserved); if (!(n == 1 && n < buf_len)) goto err; reserved &= 0x1; snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, reserved); op->len += n; } break; case WASM_OP_I32CONST: { st32 val = 0; size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT32d, opdef->txt, val); op->len += n; } break; case WASM_OP_I64CONST: { st64 val = 0; size_t n = read_i64_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT64d, opdef->txt, val); op->len += n; } break; case WASM_OP_F32CONST: { ut32 val = 0; size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; long double d = (long double)val; snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d); op->len += n; } break; case WASM_OP_F64CONST: { ut64 val = 0; size_t n = read_u64_leb128 (buf + 1, buf + buf_len, &val); if (!(n > 0 && n < buf_len)) goto err; long double d = (long double)val; snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d); op->len += n; } break; default: goto err; } return op->len; err: op->len = 1; snprintf (op->txt, R_ASM_BUFSIZE, "invalid"); return op->len; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_143_0
crossvul-cpp_data_bad_1030_1
/* ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding ** Copyright (C) 2003-2005 M. Bakker, Nero AG, http://www.nero.com ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ** ** Any non-GPL usage of this software or parts of this software is strictly ** forbidden. ** ** The "appropriate copyright message" mentioned in section 2c of the GPLv2 ** must read: "Code from FAAD2 is copyright (c) Nero AG, www.nero.com" ** ** Commercial non-GPL licensing of this software is possible. ** For more info contact Nero AG through Mpeg4AAClicense@nero.com. ** ** $Id: syntax.c,v 1.93 2009/01/26 23:51:15 menno Exp $ **/ /* Reads the AAC bitstream as defined in 14496-3 (MPEG-4 Audio) */ #include "common.h" #include "structs.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include "syntax.h" #include "specrec.h" #include "huffman.h" #include "bits.h" #include "pulse.h" #include "analysis.h" #include "drc.h" #ifdef ERROR_RESILIENCE #include "rvlc.h" #endif #ifdef SBR_DEC #include "sbr_syntax.h" #endif #include "mp4.h" /* static function declarations */ static void decode_sce_lfe(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, uint8_t id_syn_ele); static void decode_cpe(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, uint8_t id_syn_ele); static uint8_t single_lfe_channel_element(NeAACDecStruct *hDecoder, bitfile *ld, uint8_t channel, uint8_t *tag); static uint8_t channel_pair_element(NeAACDecStruct *hDecoder, bitfile *ld, uint8_t channel, uint8_t *tag); #ifdef COUPLING_DEC static uint8_t coupling_channel_element(NeAACDecStruct *hDecoder, bitfile *ld); #endif static uint16_t data_stream_element(NeAACDecStruct *hDecoder, bitfile *ld); static uint8_t program_config_element(program_config *pce, bitfile *ld); static uint8_t fill_element(NeAACDecStruct *hDecoder, bitfile *ld, drc_info *drc #ifdef SBR_DEC ,uint8_t sbr_ele #endif ); static uint8_t individual_channel_stream(NeAACDecStruct *hDecoder, element *ele, bitfile *ld, ic_stream *ics, uint8_t scal_flag, int16_t *spec_data); static uint8_t ics_info(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld, uint8_t common_window); static uint8_t section_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld); static uint8_t scale_factor_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld); #ifdef SSR_DEC static void gain_control_data(bitfile *ld, ic_stream *ics); #endif static uint8_t spectral_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld, int16_t *spectral_data); static uint16_t extension_payload(bitfile *ld, drc_info *drc, uint16_t count); static uint8_t pulse_data(ic_stream *ics, pulse_info *pul, bitfile *ld); static void tns_data(ic_stream *ics, tns_info *tns, bitfile *ld); #ifdef LTP_DEC static uint8_t ltp_data(NeAACDecStruct *hDecoder, ic_stream *ics, ltp_info *ltp, bitfile *ld); #endif static uint8_t adts_fixed_header(adts_header *adts, bitfile *ld); static void adts_variable_header(adts_header *adts, bitfile *ld); static void adts_error_check(adts_header *adts, bitfile *ld); static uint8_t dynamic_range_info(bitfile *ld, drc_info *drc); static uint8_t excluded_channels(bitfile *ld, drc_info *drc); static uint8_t side_info(NeAACDecStruct *hDecoder, element *ele, bitfile *ld, ic_stream *ics, uint8_t scal_flag); #ifdef DRM static int8_t DRM_aac_scalable_main_header(NeAACDecStruct *hDecoder, ic_stream *ics1, ic_stream *ics2, bitfile *ld, uint8_t this_layer_stereo); #endif /* Table 4.4.1 */ int8_t GASpecificConfig(bitfile *ld, mp4AudioSpecificConfig *mp4ASC, program_config *pce_out) { program_config pce; /* 1024 or 960 */ mp4ASC->frameLengthFlag = faad_get1bit(ld DEBUGVAR(1,138,"GASpecificConfig(): FrameLengthFlag")); #ifndef ALLOW_SMALL_FRAMELENGTH if (mp4ASC->frameLengthFlag == 1) return -3; #endif mp4ASC->dependsOnCoreCoder = faad_get1bit(ld DEBUGVAR(1,139,"GASpecificConfig(): DependsOnCoreCoder")); if (mp4ASC->dependsOnCoreCoder == 1) { mp4ASC->coreCoderDelay = (uint16_t)faad_getbits(ld, 14 DEBUGVAR(1,140,"GASpecificConfig(): CoreCoderDelay")); } mp4ASC->extensionFlag = faad_get1bit(ld DEBUGVAR(1,141,"GASpecificConfig(): ExtensionFlag")); if (mp4ASC->channelsConfiguration == 0) { if (program_config_element(&pce, ld)) return -3; //mp4ASC->channelsConfiguration = pce.channels; if (pce_out != NULL) memcpy(pce_out, &pce, sizeof(program_config)); /* if (pce.num_valid_cc_elements) return -3; */ } #ifdef ERROR_RESILIENCE if (mp4ASC->extensionFlag == 1) { /* Error resilience not supported yet */ if (mp4ASC->objectTypeIndex >= ER_OBJECT_START) { mp4ASC->aacSectionDataResilienceFlag = faad_get1bit(ld DEBUGVAR(1,144,"GASpecificConfig(): aacSectionDataResilienceFlag")); mp4ASC->aacScalefactorDataResilienceFlag = faad_get1bit(ld DEBUGVAR(1,145,"GASpecificConfig(): aacScalefactorDataResilienceFlag")); mp4ASC->aacSpectralDataResilienceFlag = faad_get1bit(ld DEBUGVAR(1,146,"GASpecificConfig(): aacSpectralDataResilienceFlag")); } /* 1 bit: extensionFlag3 */ faad_getbits(ld, 1); } #endif return 0; } /* Table 4.4.2 */ /* An MPEG-4 Audio decoder is only required to follow the Program Configuration Element in GASpecificConfig(). The decoder shall ignore any Program Configuration Elements that may occur in raw data blocks. PCEs transmitted in raw data blocks cannot be used to convey decoder configuration information. */ static uint8_t program_config_element(program_config *pce, bitfile *ld) { uint8_t i; memset(pce, 0, sizeof(program_config)); pce->channels = 0; pce->element_instance_tag = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,10,"program_config_element(): element_instance_tag")); pce->object_type = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,11,"program_config_element(): object_type")); pce->sf_index = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,12,"program_config_element(): sf_index")); pce->num_front_channel_elements = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,13,"program_config_element(): num_front_channel_elements")); pce->num_side_channel_elements = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,14,"program_config_element(): num_side_channel_elements")); pce->num_back_channel_elements = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,15,"program_config_element(): num_back_channel_elements")); pce->num_lfe_channel_elements = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,16,"program_config_element(): num_lfe_channel_elements")); pce->num_assoc_data_elements = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,17,"program_config_element(): num_assoc_data_elements")); pce->num_valid_cc_elements = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,18,"program_config_element(): num_valid_cc_elements")); pce->mono_mixdown_present = faad_get1bit(ld DEBUGVAR(1,19,"program_config_element(): mono_mixdown_present")); if (pce->mono_mixdown_present == 1) { pce->mono_mixdown_element_number = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,20,"program_config_element(): mono_mixdown_element_number")); } pce->stereo_mixdown_present = faad_get1bit(ld DEBUGVAR(1,21,"program_config_element(): stereo_mixdown_present")); if (pce->stereo_mixdown_present == 1) { pce->stereo_mixdown_element_number = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,22,"program_config_element(): stereo_mixdown_element_number")); } pce->matrix_mixdown_idx_present = faad_get1bit(ld DEBUGVAR(1,23,"program_config_element(): matrix_mixdown_idx_present")); if (pce->matrix_mixdown_idx_present == 1) { pce->matrix_mixdown_idx = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,24,"program_config_element(): matrix_mixdown_idx")); pce->pseudo_surround_enable = faad_get1bit(ld DEBUGVAR(1,25,"program_config_element(): pseudo_surround_enable")); } for (i = 0; i < pce->num_front_channel_elements; i++) { pce->front_element_is_cpe[i] = faad_get1bit(ld DEBUGVAR(1,26,"program_config_element(): front_element_is_cpe")); pce->front_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,27,"program_config_element(): front_element_tag_select")); if (pce->front_element_is_cpe[i] & 1) { pce->cpe_channel[pce->front_element_tag_select[i]] = pce->channels; pce->num_front_channels += 2; pce->channels += 2; } else { pce->sce_channel[pce->front_element_tag_select[i]] = pce->channels; pce->num_front_channels++; pce->channels++; } } for (i = 0; i < pce->num_side_channel_elements; i++) { pce->side_element_is_cpe[i] = faad_get1bit(ld DEBUGVAR(1,28,"program_config_element(): side_element_is_cpe")); pce->side_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,29,"program_config_element(): side_element_tag_select")); if (pce->side_element_is_cpe[i] & 1) { pce->cpe_channel[pce->side_element_tag_select[i]] = pce->channels; pce->num_side_channels += 2; pce->channels += 2; } else { pce->sce_channel[pce->side_element_tag_select[i]] = pce->channels; pce->num_side_channels++; pce->channels++; } } for (i = 0; i < pce->num_back_channel_elements; i++) { pce->back_element_is_cpe[i] = faad_get1bit(ld DEBUGVAR(1,30,"program_config_element(): back_element_is_cpe")); pce->back_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,31,"program_config_element(): back_element_tag_select")); if (pce->back_element_is_cpe[i] & 1) { pce->cpe_channel[pce->back_element_tag_select[i]] = pce->channels; pce->channels += 2; pce->num_back_channels += 2; } else { pce->sce_channel[pce->back_element_tag_select[i]] = pce->channels; pce->num_back_channels++; pce->channels++; } } for (i = 0; i < pce->num_lfe_channel_elements; i++) { pce->lfe_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,32,"program_config_element(): lfe_element_tag_select")); pce->sce_channel[pce->lfe_element_tag_select[i]] = pce->channels; pce->num_lfe_channels++; pce->channels++; } for (i = 0; i < pce->num_assoc_data_elements; i++) pce->assoc_data_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,33,"program_config_element(): assoc_data_element_tag_select")); for (i = 0; i < pce->num_valid_cc_elements; i++) { pce->cc_element_is_ind_sw[i] = faad_get1bit(ld DEBUGVAR(1,34,"program_config_element(): cc_element_is_ind_sw")); pce->valid_cc_element_tag_select[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,35,"program_config_element(): valid_cc_element_tag_select")); } faad_byte_align(ld); pce->comment_field_bytes = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(1,36,"program_config_element(): comment_field_bytes")); for (i = 0; i < pce->comment_field_bytes; i++) { pce->comment_field_data[i] = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(1,37,"program_config_element(): comment_field_data")); } pce->comment_field_data[i] = 0; if (pce->channels > MAX_CHANNELS) return 22; return 0; } static void decode_sce_lfe(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, uint8_t id_syn_ele) { uint8_t channels = hDecoder->fr_channels; uint8_t tag = 0; if (channels+1 > MAX_CHANNELS) { hInfo->error = 12; return; } if (hDecoder->fr_ch_ele+1 > MAX_SYNTAX_ELEMENTS) { hInfo->error = 13; return; } /* for SCE hDecoder->element_output_channels[] is not set here because this can become 2 when some form of Parametric Stereo coding is used */ if (hDecoder->frame && hDecoder->element_id[hDecoder->fr_ch_ele] != id_syn_ele) { /* element inconsistency */ hInfo->error = 21; return; } /* save the syntax element id */ hDecoder->element_id[hDecoder->fr_ch_ele] = id_syn_ele; /* decode the element */ hInfo->error = single_lfe_channel_element(hDecoder, ld, channels, &tag); /* map output channels position to internal data channels */ if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 2) { /* this might be faulty when pce_set is true */ hDecoder->internal_channel[channels] = channels; hDecoder->internal_channel[channels+1] = channels+1; } else { if (hDecoder->pce_set) hDecoder->internal_channel[hDecoder->pce.sce_channel[tag]] = channels; else hDecoder->internal_channel[channels] = channels; } hDecoder->fr_channels += hDecoder->element_output_channels[hDecoder->fr_ch_ele]; hDecoder->fr_ch_ele++; } static void decode_cpe(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, uint8_t id_syn_ele) { uint8_t channels = hDecoder->fr_channels; uint8_t tag = 0; if (channels+2 > MAX_CHANNELS) { hInfo->error = 12; return; } if (hDecoder->fr_ch_ele+1 > MAX_SYNTAX_ELEMENTS) { hInfo->error = 13; return; } /* for CPE the number of output channels is always 2 */ if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 0) { /* element_output_channels not set yet */ hDecoder->element_output_channels[hDecoder->fr_ch_ele] = 2; } else if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] != 2) { /* element inconsistency */ hInfo->error = 21; return; } if (hDecoder->frame && hDecoder->element_id[hDecoder->fr_ch_ele] != id_syn_ele) { /* element inconsistency */ hInfo->error = 21; return; } /* save the syntax element id */ hDecoder->element_id[hDecoder->fr_ch_ele] = id_syn_ele; /* decode the element */ hInfo->error = channel_pair_element(hDecoder, ld, channels, &tag); /* map output channel position to internal data channels */ if (hDecoder->pce_set) { hDecoder->internal_channel[hDecoder->pce.cpe_channel[tag]] = channels; hDecoder->internal_channel[hDecoder->pce.cpe_channel[tag]+1] = channels+1; } else { hDecoder->internal_channel[channels] = channels; hDecoder->internal_channel[channels+1] = channels+1; } hDecoder->fr_channels += 2; hDecoder->fr_ch_ele++; } void raw_data_block(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, program_config *pce, drc_info *drc) { uint8_t id_syn_ele; uint8_t ele_this_frame = 0; hDecoder->fr_channels = 0; hDecoder->fr_ch_ele = 0; hDecoder->first_syn_ele = 25; hDecoder->has_lfe = 0; #ifdef ERROR_RESILIENCE if (hDecoder->object_type < ER_OBJECT_START) { #endif /* Table 4.4.3: raw_data_block() */ while ((id_syn_ele = (uint8_t)faad_getbits(ld, LEN_SE_ID DEBUGVAR(1,4,"NeAACDecDecode(): id_syn_ele"))) != ID_END) { switch (id_syn_ele) { case ID_SCE: ele_this_frame++; if (hDecoder->first_syn_ele == 25) hDecoder->first_syn_ele = id_syn_ele; decode_sce_lfe(hDecoder, hInfo, ld, id_syn_ele); if (hInfo->error > 0) return; break; case ID_CPE: ele_this_frame++; if (hDecoder->first_syn_ele == 25) hDecoder->first_syn_ele = id_syn_ele; decode_cpe(hDecoder, hInfo, ld, id_syn_ele); if (hInfo->error > 0) return; break; case ID_LFE: #ifdef DRM hInfo->error = 32; #else ele_this_frame++; hDecoder->has_lfe++; decode_sce_lfe(hDecoder, hInfo, ld, id_syn_ele); #endif if (hInfo->error > 0) return; break; case ID_CCE: /* not implemented yet, but skip the bits */ #ifdef DRM hInfo->error = 32; #else ele_this_frame++; #ifdef COUPLING_DEC hInfo->error = coupling_channel_element(hDecoder, ld); #else hInfo->error = 6; #endif #endif if (hInfo->error > 0) return; break; case ID_DSE: ele_this_frame++; data_stream_element(hDecoder, ld); break; case ID_PCE: if (ele_this_frame != 0) { hInfo->error = 31; return; } ele_this_frame++; /* 14496-4: 5.6.4.1.2.1.3: */ /* program_configuration_element()'s in access units shall be ignored */ program_config_element(pce, ld); //if ((hInfo->error = program_config_element(pce, ld)) > 0) // return; //hDecoder->pce_set = 1; break; case ID_FIL: ele_this_frame++; /* one sbr_info describes a channel_element not a channel! */ /* if we encounter SBR data here: error */ /* SBR data will be read directly in the SCE/LFE/CPE element */ if ((hInfo->error = fill_element(hDecoder, ld, drc #ifdef SBR_DEC , INVALID_SBR_ELEMENT #endif )) > 0) return; break; } } #ifdef ERROR_RESILIENCE } else { /* Table 262: er_raw_data_block() */ switch (hDecoder->channelConfiguration) { case 1: decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); if (hInfo->error > 0) return; break; case 2: decode_cpe(hDecoder, hInfo, ld, ID_CPE); if (hInfo->error > 0) return; break; case 3: decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); if (hInfo->error > 0) return; break; case 4: decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); if (hInfo->error > 0) return; break; case 5: decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); if (hInfo->error > 0) return; break; case 6: decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_sce_lfe(hDecoder, hInfo, ld, ID_LFE); if (hInfo->error > 0) return; break; case 7: /* 8 channels */ decode_sce_lfe(hDecoder, hInfo, ld, ID_SCE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_cpe(hDecoder, hInfo, ld, ID_CPE); decode_sce_lfe(hDecoder, hInfo, ld, ID_LFE); if (hInfo->error > 0) return; break; default: hInfo->error = 7; return; } #if 0 cnt = bits_to_decode() / 8; while (cnt >= 1) { cnt -= extension_payload(cnt); } #endif } #endif /* new in corrigendum 14496-3:2002 */ #ifdef DRM if (hDecoder->object_type != DRM_ER_LC #if 0 && !hDecoder->latm_header_present #endif ) #endif { faad_byte_align(ld); } return; } /* Table 4.4.4 and */ /* Table 4.4.9 */ static uint8_t single_lfe_channel_element(NeAACDecStruct *hDecoder, bitfile *ld, uint8_t channel, uint8_t *tag) { uint8_t retval = 0; element sce = {0}; ic_stream *ics = &(sce.ics1); ALIGN int16_t spec_data[1024] = {0}; sce.element_instance_tag = (uint8_t)faad_getbits(ld, LEN_TAG DEBUGVAR(1,38,"single_lfe_channel_element(): element_instance_tag")); *tag = sce.element_instance_tag; sce.channel = channel; sce.paired_channel = -1; retval = individual_channel_stream(hDecoder, &sce, ld, ics, 0, spec_data); if (retval > 0) return retval; /* IS not allowed in single channel */ if (ics->is_used) return 32; #ifdef SBR_DEC /* check if next bitstream element is a fill element */ /* if so, read it now so SBR decoding can be done in case of a file with SBR */ if (faad_showbits(ld, LEN_SE_ID) == ID_FIL) { faad_flushbits(ld, LEN_SE_ID); /* one sbr_info describes a channel_element not a channel! */ if ((retval = fill_element(hDecoder, ld, hDecoder->drc, hDecoder->fr_ch_ele)) > 0) { return retval; } } #endif /* noiseless coding is done, spectral reconstruction is done now */ retval = reconstruct_single_channel(hDecoder, ics, &sce, spec_data); if (retval > 0) return retval; return 0; } /* Table 4.4.5 */ static uint8_t channel_pair_element(NeAACDecStruct *hDecoder, bitfile *ld, uint8_t channels, uint8_t *tag) { ALIGN int16_t spec_data1[1024] = {0}; ALIGN int16_t spec_data2[1024] = {0}; element cpe = {0}; ic_stream *ics1 = &(cpe.ics1); ic_stream *ics2 = &(cpe.ics2); uint8_t result; cpe.channel = channels; cpe.paired_channel = channels+1; cpe.element_instance_tag = (uint8_t)faad_getbits(ld, LEN_TAG DEBUGVAR(1,39,"channel_pair_element(): element_instance_tag")); *tag = cpe.element_instance_tag; if ((cpe.common_window = faad_get1bit(ld DEBUGVAR(1,40,"channel_pair_element(): common_window"))) & 1) { /* both channels have common ics information */ if ((result = ics_info(hDecoder, ics1, ld, cpe.common_window)) > 0) return result; ics1->ms_mask_present = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,41,"channel_pair_element(): ms_mask_present")); if (ics1->ms_mask_present == 3) { /* bitstream error */ return 32; } if (ics1->ms_mask_present == 1) { uint8_t g, sfb; for (g = 0; g < ics1->num_window_groups; g++) { for (sfb = 0; sfb < ics1->max_sfb; sfb++) { ics1->ms_used[g][sfb] = faad_get1bit(ld DEBUGVAR(1,42,"channel_pair_element(): faad_get1bit")); } } } #ifdef ERROR_RESILIENCE if ((hDecoder->object_type >= ER_OBJECT_START) && (ics1->predictor_data_present)) { if (( #ifdef LTP_DEC ics1->ltp.data_present = #endif faad_get1bit(ld DEBUGVAR(1,50,"channel_pair_element(): ltp.data_present"))) & 1) { #ifdef LTP_DEC if ((result = ltp_data(hDecoder, ics1, &(ics1->ltp), ld)) > 0) { return result; } #else return 26; #endif } } #endif memcpy(ics2, ics1, sizeof(ic_stream)); } else { ics1->ms_mask_present = 0; } if ((result = individual_channel_stream(hDecoder, &cpe, ld, ics1, 0, spec_data1)) > 0) { return result; } #ifdef ERROR_RESILIENCE if (cpe.common_window && (hDecoder->object_type >= ER_OBJECT_START) && (ics1->predictor_data_present)) { if (( #ifdef LTP_DEC ics1->ltp2.data_present = #endif faad_get1bit(ld DEBUGVAR(1,50,"channel_pair_element(): ltp.data_present"))) & 1) { #ifdef LTP_DEC if ((result = ltp_data(hDecoder, ics1, &(ics1->ltp2), ld)) > 0) { return result; } #else return 26; #endif } } #endif if ((result = individual_channel_stream(hDecoder, &cpe, ld, ics2, 0, spec_data2)) > 0) { return result; } #ifdef SBR_DEC /* check if next bitstream element is a fill element */ /* if so, read it now so SBR decoding can be done in case of a file with SBR */ if (faad_showbits(ld, LEN_SE_ID) == ID_FIL) { faad_flushbits(ld, LEN_SE_ID); /* one sbr_info describes a channel_element not a channel! */ if ((result = fill_element(hDecoder, ld, hDecoder->drc, hDecoder->fr_ch_ele)) > 0) { return result; } } #endif /* noiseless coding is done, spectral reconstruction is done now */ if ((result = reconstruct_channel_pair(hDecoder, ics1, ics2, &cpe, spec_data1, spec_data2)) > 0) { return result; } return 0; } /* Table 4.4.6 */ static uint8_t ics_info(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld, uint8_t common_window) { uint8_t retval = 0; uint8_t ics_reserved_bit; ics_reserved_bit = faad_get1bit(ld DEBUGVAR(1,43,"ics_info(): ics_reserved_bit")); if (ics_reserved_bit != 0) return 32; ics->window_sequence = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,44,"ics_info(): window_sequence")); ics->window_shape = faad_get1bit(ld DEBUGVAR(1,45,"ics_info(): window_shape")); #ifdef LD_DEC /* No block switching in LD */ if ((hDecoder->object_type == LD) && (ics->window_sequence != ONLY_LONG_SEQUENCE)) return 32; #endif if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { ics->max_sfb = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,46,"ics_info(): max_sfb (short)")); ics->scale_factor_grouping = (uint8_t)faad_getbits(ld, 7 DEBUGVAR(1,47,"ics_info(): scale_factor_grouping")); } else { ics->max_sfb = (uint8_t)faad_getbits(ld, 6 DEBUGVAR(1,48,"ics_info(): max_sfb (long)")); } /* get the grouping information */ if ((retval = window_grouping_info(hDecoder, ics)) > 0) return retval; /* should be an error */ /* check the range of max_sfb */ if (ics->max_sfb > ics->num_swb) return 16; if (ics->window_sequence != EIGHT_SHORT_SEQUENCE) { if ((ics->predictor_data_present = faad_get1bit(ld DEBUGVAR(1,49,"ics_info(): predictor_data_present"))) & 1) { if (hDecoder->object_type == MAIN) /* MPEG2 style AAC predictor */ { uint8_t sfb; uint8_t limit = min(ics->max_sfb, max_pred_sfb(hDecoder->sf_index)); #ifdef MAIN_DEC ics->pred.limit = limit; #endif if (( #ifdef MAIN_DEC ics->pred.predictor_reset = #endif faad_get1bit(ld DEBUGVAR(1,53,"ics_info(): pred.predictor_reset"))) & 1) { #ifdef MAIN_DEC ics->pred.predictor_reset_group_number = #endif (uint8_t)faad_getbits(ld, 5 DEBUGVAR(1,54,"ics_info(): pred.predictor_reset_group_number")); } for (sfb = 0; sfb < limit; sfb++) { #ifdef MAIN_DEC ics->pred.prediction_used[sfb] = #endif faad_get1bit(ld DEBUGVAR(1,55,"ics_info(): pred.prediction_used")); } } #ifdef LTP_DEC else { /* Long Term Prediction */ if (hDecoder->object_type < ER_OBJECT_START) { if ((ics->ltp.data_present = faad_get1bit(ld DEBUGVAR(1,50,"ics_info(): ltp.data_present"))) & 1) { if ((retval = ltp_data(hDecoder, ics, &(ics->ltp), ld)) > 0) { return retval; } } if (common_window) { if ((ics->ltp2.data_present = faad_get1bit(ld DEBUGVAR(1,51,"ics_info(): ltp2.data_present"))) & 1) { if ((retval = ltp_data(hDecoder, ics, &(ics->ltp2), ld)) > 0) { return retval; } } } } #ifdef ERROR_RESILIENCE if (!common_window && (hDecoder->object_type >= ER_OBJECT_START)) { if ((ics->ltp.data_present = faad_get1bit(ld DEBUGVAR(1,50,"ics_info(): ltp.data_present"))) & 1) { ltp_data(hDecoder, ics, &(ics->ltp), ld); } } #endif } #endif } } return retval; } /* Table 4.4.7 */ static uint8_t pulse_data(ic_stream *ics, pulse_info *pul, bitfile *ld) { uint8_t i; pul->number_pulse = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,56,"pulse_data(): number_pulse")); pul->pulse_start_sfb = (uint8_t)faad_getbits(ld, 6 DEBUGVAR(1,57,"pulse_data(): pulse_start_sfb")); /* check the range of pulse_start_sfb */ if (pul->pulse_start_sfb > ics->num_swb) return 16; for (i = 0; i < pul->number_pulse+1; i++) { pul->pulse_offset[i] = (uint8_t)faad_getbits(ld, 5 DEBUGVAR(1,58,"pulse_data(): pulse_offset")); #if 0 printf("%d\n", pul->pulse_offset[i]); #endif pul->pulse_amp[i] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,59,"pulse_data(): pulse_amp")); #if 0 printf("%d\n", pul->pulse_amp[i]); #endif } return 0; } #ifdef COUPLING_DEC /* Table 4.4.8: Currently just for skipping the bits... */ static uint8_t coupling_channel_element(NeAACDecStruct *hDecoder, bitfile *ld) { uint8_t c, result = 0; uint8_t ind_sw_cce_flag = 0; uint8_t num_gain_element_lists = 0; uint8_t num_coupled_elements = 0; element el_empty = {0}; ic_stream ics_empty = {0}; int16_t sh_data[1024]; c = faad_getbits(ld, LEN_TAG DEBUGVAR(1,900,"coupling_channel_element(): element_instance_tag")); ind_sw_cce_flag = faad_get1bit(ld DEBUGVAR(1,901,"coupling_channel_element(): ind_sw_cce_flag")); num_coupled_elements = faad_getbits(ld, 3 DEBUGVAR(1,902,"coupling_channel_element(): num_coupled_elements")); for (c = 0; c < num_coupled_elements + 1; c++) { uint8_t cc_target_is_cpe, cc_target_tag_select; num_gain_element_lists++; cc_target_is_cpe = faad_get1bit(ld DEBUGVAR(1,903,"coupling_channel_element(): cc_target_is_cpe")); cc_target_tag_select = faad_getbits(ld, 4 DEBUGVAR(1,904,"coupling_channel_element(): cc_target_tag_select")); if (cc_target_is_cpe) { uint8_t cc_l = faad_get1bit(ld DEBUGVAR(1,905,"coupling_channel_element(): cc_l")); uint8_t cc_r = faad_get1bit(ld DEBUGVAR(1,906,"coupling_channel_element(): cc_r")); if (cc_l && cc_r) num_gain_element_lists++; } } faad_get1bit(ld DEBUGVAR(1,907,"coupling_channel_element(): cc_domain")); faad_get1bit(ld DEBUGVAR(1,908,"coupling_channel_element(): gain_element_sign")); faad_getbits(ld, 2 DEBUGVAR(1,909,"coupling_channel_element(): gain_element_scale")); if ((result = individual_channel_stream(hDecoder, &el_empty, ld, &ics_empty, 0, sh_data)) > 0) { return result; } /* IS not allowed in single channel */ if (ics->is_used) return 32; for (c = 1; c < num_gain_element_lists; c++) { uint8_t cge; if (ind_sw_cce_flag) { cge = 1; } else { cge = faad_get1bit(ld DEBUGVAR(1,910,"coupling_channel_element(): common_gain_element_present")); } if (cge) { huffman_scale_factor(ld); } else { uint8_t g, sfb; for (g = 0; g < ics_empty.num_window_groups; g++) { for (sfb = 0; sfb < ics_empty.max_sfb; sfb++) { if (ics_empty.sfb_cb[g][sfb] != ZERO_HCB) huffman_scale_factor(ld); } } } } return 0; } #endif /* Table 4.4.10 */ static uint16_t data_stream_element(NeAACDecStruct *hDecoder, bitfile *ld) { uint8_t byte_aligned; uint16_t i, count; /* element_instance_tag = */ faad_getbits(ld, LEN_TAG DEBUGVAR(1,60,"data_stream_element(): element_instance_tag")); byte_aligned = faad_get1bit(ld DEBUGVAR(1,61,"data_stream_element(): byte_aligned")); count = (uint16_t)faad_getbits(ld, 8 DEBUGVAR(1,62,"data_stream_element(): count")); if (count == 255) { count += (uint16_t)faad_getbits(ld, 8 DEBUGVAR(1,63,"data_stream_element(): extra count")); } if (byte_aligned) faad_byte_align(ld); for (i = 0; i < count; i++) { faad_getbits(ld, LEN_BYTE DEBUGVAR(1,64,"data_stream_element(): data_stream_byte")); } return count; } /* Table 4.4.11 */ static uint8_t fill_element(NeAACDecStruct *hDecoder, bitfile *ld, drc_info *drc #ifdef SBR_DEC ,uint8_t sbr_ele #endif ) { uint16_t count; #ifdef SBR_DEC uint8_t bs_extension_type; #endif count = (uint16_t)faad_getbits(ld, 4 DEBUGVAR(1,65,"fill_element(): count")); if (count == 15) { count += (uint16_t)faad_getbits(ld, 8 DEBUGVAR(1,66,"fill_element(): extra count")) - 1; } if (count > 0) { #ifdef SBR_DEC bs_extension_type = (uint8_t)faad_showbits(ld, 4); if ((bs_extension_type == EXT_SBR_DATA) || (bs_extension_type == EXT_SBR_DATA_CRC)) { if (sbr_ele == INVALID_SBR_ELEMENT) return 24; if (!hDecoder->sbr[sbr_ele]) { hDecoder->sbr[sbr_ele] = sbrDecodeInit(hDecoder->frameLength, hDecoder->element_id[sbr_ele], 2*get_sample_rate(hDecoder->sf_index), hDecoder->downSampledSBR #ifdef DRM , 0 #endif ); } hDecoder->sbr_present_flag = 1; /* parse the SBR data */ hDecoder->sbr[sbr_ele]->ret = sbr_extension_data(ld, hDecoder->sbr[sbr_ele], count, hDecoder->postSeekResetFlag); #if 0 if (hDecoder->sbr[sbr_ele]->ret > 0) { printf("%s\n", NeAACDecGetErrorMessage(hDecoder->sbr[sbr_ele]->ret)); } #endif #if (defined(PS_DEC) || defined(DRM_PS)) if (hDecoder->sbr[sbr_ele]->ps_used) { hDecoder->ps_used[sbr_ele] = 1; /* set element independent flag to 1 as well */ hDecoder->ps_used_global = 1; } #endif } else { #endif #ifndef DRM while (count > 0) { count -= extension_payload(ld, drc, count); } #else return 30; #endif #ifdef SBR_DEC } #endif } return 0; } /* Table 4.4.12 */ #ifdef SSR_DEC static void gain_control_data(bitfile *ld, ic_stream *ics) { uint8_t bd, wd, ad; ssr_info *ssr = &(ics->ssr); ssr->max_band = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,1000,"gain_control_data(): max_band")); if (ics->window_sequence == ONLY_LONG_SEQUENCE) { for (bd = 1; bd <= ssr->max_band; bd++) { for (wd = 0; wd < 1; wd++) { ssr->adjust_num[bd][wd] = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,1001,"gain_control_data(): adjust_num")); for (ad = 0; ad < ssr->adjust_num[bd][wd]; ad++) { ssr->alevcode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1002,"gain_control_data(): alevcode")); ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 5 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } } } } else if (ics->window_sequence == LONG_START_SEQUENCE) { for (bd = 1; bd <= ssr->max_band; bd++) { for (wd = 0; wd < 2; wd++) { ssr->adjust_num[bd][wd] = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,1001,"gain_control_data(): adjust_num")); for (ad = 0; ad < ssr->adjust_num[bd][wd]; ad++) { ssr->alevcode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1002,"gain_control_data(): alevcode")); if (wd == 0) { ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } else { ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } } } } } else if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { for (bd = 1; bd <= ssr->max_band; bd++) { for (wd = 0; wd < 8; wd++) { ssr->adjust_num[bd][wd] = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,1001,"gain_control_data(): adjust_num")); for (ad = 0; ad < ssr->adjust_num[bd][wd]; ad++) { ssr->alevcode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1002,"gain_control_data(): alevcode")); ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } } } } else if (ics->window_sequence == LONG_STOP_SEQUENCE) { for (bd = 1; bd <= ssr->max_band; bd++) { for (wd = 0; wd < 2; wd++) { ssr->adjust_num[bd][wd] = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,1001,"gain_control_data(): adjust_num")); for (ad = 0; ad < ssr->adjust_num[bd][wd]; ad++) { ssr->alevcode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1002,"gain_control_data(): alevcode")); if (wd == 0) { ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } else { ssr->aloccode[bd][wd][ad] = (uint8_t)faad_getbits(ld, 5 DEBUGVAR(1,1003,"gain_control_data(): aloccode")); } } } } } } #endif #ifdef DRM /* Table 4.4.13 ASME */ void DRM_aac_scalable_main_element(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, program_config *pce, drc_info *drc) { uint8_t retval = 0; uint8_t channels = hDecoder->fr_channels = 0; uint8_t ch; uint8_t this_layer_stereo = (hDecoder->channelConfiguration > 1) ? 1 : 0; element cpe = {0}; ic_stream *ics1 = &(cpe.ics1); ic_stream *ics2 = &(cpe.ics2); int16_t *spec_data; ALIGN int16_t spec_data1[1024] = {0}; ALIGN int16_t spec_data2[1024] = {0}; hDecoder->fr_ch_ele = 0; hInfo->error = DRM_aac_scalable_main_header(hDecoder, ics1, ics2, ld, this_layer_stereo); if (hInfo->error > 0) return; cpe.common_window = 1; if (this_layer_stereo) { hDecoder->element_id[0] = ID_CPE; if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 0) hDecoder->element_output_channels[hDecoder->fr_ch_ele] = 2; } else { hDecoder->element_id[0] = ID_SCE; } if (this_layer_stereo) { cpe.channel = 0; cpe.paired_channel = 1; } /* Stereo2 / Mono1 */ ics1->tns_data_present = faad_get1bit(ld); #if defined(LTP_DEC) ics1->ltp.data_present = faad_get1bit(ld); #elif defined (DRM) if(faad_get1bit(ld)) { hInfo->error = 26; return; } #else faad_get1bit(ld); #endif hInfo->error = side_info(hDecoder, &cpe, ld, ics1, 1); if (hInfo->error > 0) return; if (this_layer_stereo) { /* Stereo3 */ ics2->tns_data_present = faad_get1bit(ld); #ifdef LTP_DEC ics1->ltp.data_present = #endif faad_get1bit(ld); hInfo->error = side_info(hDecoder, &cpe, ld, ics2, 1); if (hInfo->error > 0) return; } /* Stereo4 / Mono2 */ if (ics1->tns_data_present) tns_data(ics1, &(ics1->tns), ld); if (this_layer_stereo) { /* Stereo5 */ if (ics2->tns_data_present) tns_data(ics2, &(ics2->tns), ld); } #ifdef DRM /* CRC check */ if (hDecoder->object_type == DRM_ER_LC) { if ((hInfo->error = (uint8_t)faad_check_CRC(ld, (uint16_t)faad_get_processed_bits(ld) - 8)) > 0) return; } #endif /* Stereo6 / Mono3 */ /* error resilient spectral data decoding */ if ((hInfo->error = reordered_spectral_data(hDecoder, ics1, ld, spec_data1)) > 0) { return; } if (this_layer_stereo) { /* Stereo7 */ /* error resilient spectral data decoding */ if ((hInfo->error = reordered_spectral_data(hDecoder, ics2, ld, spec_data2)) > 0) { return; } } #ifdef DRM #ifdef SBR_DEC /* In case of DRM we need to read the SBR info before channel reconstruction */ if ((hDecoder->sbr_present_flag == 1) && (hDecoder->object_type == DRM_ER_LC)) { bitfile ld_sbr = {0}; uint32_t i; uint16_t count = 0; uint8_t *revbuffer; uint8_t *prevbufstart; uint8_t *pbufend; /* all forward bitreading should be finished at this point */ uint32_t bitsconsumed = faad_get_processed_bits(ld); uint32_t buffer_size = faad_origbitbuffer_size(ld); uint8_t *buffer = (uint8_t*)faad_origbitbuffer(ld); if (bitsconsumed + 8 > buffer_size*8) { hInfo->error = 14; return; } if (!hDecoder->sbr[0]) { hDecoder->sbr[0] = sbrDecodeInit(hDecoder->frameLength, hDecoder->element_id[0], 2*get_sample_rate(hDecoder->sf_index), 0 /* ds SBR */, 1); } /* Reverse bit reading of SBR data in DRM audio frame */ revbuffer = (uint8_t*)faad_malloc(buffer_size*sizeof(uint8_t)); prevbufstart = revbuffer; pbufend = &buffer[buffer_size - 1]; for (i = 0; i < buffer_size; i++) *prevbufstart++ = tabFlipbits[*pbufend--]; /* Set SBR data */ /* consider 8 bits from AAC-CRC */ /* SBR buffer size is original buffer size minus AAC buffer size */ count = (uint16_t)bit2byte(buffer_size*8 - bitsconsumed); faad_initbits(&ld_sbr, revbuffer, count); hDecoder->sbr[0]->sample_rate = get_sample_rate(hDecoder->sf_index); hDecoder->sbr[0]->sample_rate *= 2; faad_getbits(&ld_sbr, 8); /* Skip 8-bit CRC */ hDecoder->sbr[0]->ret = sbr_extension_data(&ld_sbr, hDecoder->sbr[0], count, hDecoder->postSeekResetFlag); #if (defined(PS_DEC) || defined(DRM_PS)) if (hDecoder->sbr[0]->ps_used) { hDecoder->ps_used[0] = 1; hDecoder->ps_used_global = 1; } #endif if (ld_sbr.error) { hDecoder->sbr[0]->ret = 1; } /* check CRC */ /* no need to check it if there was already an error */ if (hDecoder->sbr[0]->ret == 0) hDecoder->sbr[0]->ret = (uint8_t)faad_check_CRC(&ld_sbr, (uint16_t)faad_get_processed_bits(&ld_sbr) - 8); /* SBR data was corrupted, disable it until the next header */ if (hDecoder->sbr[0]->ret != 0) { hDecoder->sbr[0]->header_count = 0; } faad_endbits(&ld_sbr); if (revbuffer) faad_free(revbuffer); } #endif #endif if (this_layer_stereo) { hInfo->error = reconstruct_channel_pair(hDecoder, ics1, ics2, &cpe, spec_data1, spec_data2); if (hInfo->error > 0) return; } else { hInfo->error = reconstruct_single_channel(hDecoder, ics1, &cpe, spec_data1); if (hInfo->error > 0) return; } /* map output channels position to internal data channels */ if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 2) { /* this might be faulty when pce_set is true */ hDecoder->internal_channel[channels] = channels; hDecoder->internal_channel[channels+1] = channels+1; } else { hDecoder->internal_channel[channels] = channels; } hDecoder->fr_channels += hDecoder->element_output_channels[hDecoder->fr_ch_ele]; hDecoder->fr_ch_ele++; return; } /* Table 4.4.15 */ static int8_t DRM_aac_scalable_main_header(NeAACDecStruct *hDecoder, ic_stream *ics1, ic_stream *ics2, bitfile *ld, uint8_t this_layer_stereo) { uint8_t retval = 0; uint8_t ch; ic_stream *ics; uint8_t ics_reserved_bit; ics_reserved_bit = faad_get1bit(ld DEBUGVAR(1,300,"aac_scalable_main_header(): ics_reserved_bits")); if (ics_reserved_bit != 0) return 32; ics1->window_sequence = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,301,"aac_scalable_main_header(): window_sequence")); ics1->window_shape = faad_get1bit(ld DEBUGVAR(1,302,"aac_scalable_main_header(): window_shape")); if (ics1->window_sequence == EIGHT_SHORT_SEQUENCE) { ics1->max_sfb = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,303,"aac_scalable_main_header(): max_sfb (short)")); ics1->scale_factor_grouping = (uint8_t)faad_getbits(ld, 7 DEBUGVAR(1,304,"aac_scalable_main_header(): scale_factor_grouping")); } else { ics1->max_sfb = (uint8_t)faad_getbits(ld, 6 DEBUGVAR(1,305,"aac_scalable_main_header(): max_sfb (long)")); } /* get the grouping information */ if ((retval = window_grouping_info(hDecoder, ics1)) > 0) return retval; /* should be an error */ /* check the range of max_sfb */ if (ics1->max_sfb > ics1->num_swb) return 16; if (this_layer_stereo) { ics1->ms_mask_present = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,306,"aac_scalable_main_header(): ms_mask_present")); if (ics1->ms_mask_present == 3) { /* bitstream error */ return 32; } if (ics1->ms_mask_present == 1) { uint8_t g, sfb; for (g = 0; g < ics1->num_window_groups; g++) { for (sfb = 0; sfb < ics1->max_sfb; sfb++) { ics1->ms_used[g][sfb] = faad_get1bit(ld DEBUGVAR(1,307,"aac_scalable_main_header(): faad_get1bit")); } } } memcpy(ics2, ics1, sizeof(ic_stream)); } else { ics1->ms_mask_present = 0; } return 0; } #endif static uint8_t side_info(NeAACDecStruct *hDecoder, element *ele, bitfile *ld, ic_stream *ics, uint8_t scal_flag) { uint8_t result; ics->global_gain = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(1,67,"individual_channel_stream(): global_gain")); if (!ele->common_window && !scal_flag) { if ((result = ics_info(hDecoder, ics, ld, ele->common_window)) > 0) return result; } if ((result = section_data(hDecoder, ics, ld)) > 0) return result; if ((result = scale_factor_data(hDecoder, ics, ld)) > 0) return result; if (!scal_flag) { /** ** NOTE: It could be that pulse data is available in scalable AAC too, ** as said in Amendment 1, this could be only the case for ER AAC, ** though. (have to check this out later) **/ /* get pulse data */ if ((ics->pulse_data_present = faad_get1bit(ld DEBUGVAR(1,68,"individual_channel_stream(): pulse_data_present"))) & 1) { if ((result = pulse_data(ics, &(ics->pul), ld)) > 0) return result; } /* get tns data */ if ((ics->tns_data_present = faad_get1bit(ld DEBUGVAR(1,69,"individual_channel_stream(): tns_data_present"))) & 1) { #ifdef ERROR_RESILIENCE if (hDecoder->object_type < ER_OBJECT_START) #endif tns_data(ics, &(ics->tns), ld); } /* get gain control data */ if ((ics->gain_control_data_present = faad_get1bit(ld DEBUGVAR(1,70,"individual_channel_stream(): gain_control_data_present"))) & 1) { #ifdef SSR_DEC if (hDecoder->object_type != SSR) return 1; else gain_control_data(ld, ics); #else return 1; #endif } } #ifdef ERROR_RESILIENCE if (hDecoder->aacSpectralDataResilienceFlag) { ics->length_of_reordered_spectral_data = (uint16_t)faad_getbits(ld, 14 DEBUGVAR(1,147,"individual_channel_stream(): length_of_reordered_spectral_data")); if (hDecoder->channelConfiguration == 2) { if (ics->length_of_reordered_spectral_data > 6144) ics->length_of_reordered_spectral_data = 6144; } else { if (ics->length_of_reordered_spectral_data > 12288) ics->length_of_reordered_spectral_data = 12288; } ics->length_of_longest_codeword = (uint8_t)faad_getbits(ld, 6 DEBUGVAR(1,148,"individual_channel_stream(): length_of_longest_codeword")); if (ics->length_of_longest_codeword >= 49) ics->length_of_longest_codeword = 49; } /* RVLC spectral data is put here */ if (hDecoder->aacScalefactorDataResilienceFlag) { if ((result = rvlc_decode_scale_factors(ics, ld)) > 0) return result; } #endif return 0; } /* Table 4.4.24 */ static uint8_t individual_channel_stream(NeAACDecStruct *hDecoder, element *ele, bitfile *ld, ic_stream *ics, uint8_t scal_flag, int16_t *spec_data) { uint8_t result; result = side_info(hDecoder, ele, ld, ics, scal_flag); if (result > 0) return result; if (hDecoder->object_type >= ER_OBJECT_START) { if (ics->tns_data_present) tns_data(ics, &(ics->tns), ld); } #ifdef DRM /* CRC check */ if (hDecoder->object_type == DRM_ER_LC) { if ((result = (uint8_t)faad_check_CRC(ld, (uint16_t)faad_get_processed_bits(ld) - 8)) > 0) return result; } #endif #ifdef ERROR_RESILIENCE if (hDecoder->aacSpectralDataResilienceFlag) { /* error resilient spectral data decoding */ if ((result = reordered_spectral_data(hDecoder, ics, ld, spec_data)) > 0) { return result; } } else { #endif /* decode the spectral data */ if ((result = spectral_data(hDecoder, ics, ld, spec_data)) > 0) { return result; } #ifdef ERROR_RESILIENCE } #endif /* pulse coding reconstruction */ if (ics->pulse_data_present) { if (ics->window_sequence != EIGHT_SHORT_SEQUENCE) { if ((result = pulse_decode(ics, spec_data, hDecoder->frameLength)) > 0) return result; } else { return 2; /* pulse coding not allowed for short blocks */ } } return 0; } /* Table 4.4.25 */ static uint8_t section_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld) { uint8_t g; uint8_t sect_esc_val, sect_bits; if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) sect_bits = 3; else sect_bits = 5; sect_esc_val = (1<<sect_bits) - 1; #if 0 printf("\ntotal sfb %d\n", ics->max_sfb); printf(" sect top cb\n"); #endif for (g = 0; g < ics->num_window_groups; g++) { uint8_t k = 0; uint8_t i = 0; while (k < ics->max_sfb) { #ifdef ERROR_RESILIENCE uint8_t vcb11 = 0; #endif uint8_t sfb; uint8_t sect_len_incr; uint16_t sect_len = 0; uint8_t sect_cb_bits = 4; /* if "faad_getbits" detects error and returns "0", "k" is never incremented and we cannot leave the while loop */ if (ld->error != 0) return 14; #ifdef ERROR_RESILIENCE if (hDecoder->aacSectionDataResilienceFlag) sect_cb_bits = 5; #endif ics->sect_cb[g][i] = (uint8_t)faad_getbits(ld, sect_cb_bits DEBUGVAR(1,71,"section_data(): sect_cb")); if (ics->sect_cb[g][i] == 12) return 32; #if 0 printf("%d\n", ics->sect_cb[g][i]); #endif #ifndef DRM if (ics->sect_cb[g][i] == NOISE_HCB) ics->noise_used = 1; #else /* PNS not allowed in DRM */ if (ics->sect_cb[g][i] == NOISE_HCB) return 29; #endif if (ics->sect_cb[g][i] == INTENSITY_HCB2 || ics->sect_cb[g][i] == INTENSITY_HCB) ics->is_used = 1; #ifdef ERROR_RESILIENCE if (hDecoder->aacSectionDataResilienceFlag) { if ((ics->sect_cb[g][i] == 11) || ((ics->sect_cb[g][i] >= 16) && (ics->sect_cb[g][i] <= 32))) { vcb11 = 1; } } if (vcb11) { sect_len_incr = 1; } else { #endif sect_len_incr = (uint8_t)faad_getbits(ld, sect_bits DEBUGVAR(1,72,"section_data(): sect_len_incr")); #ifdef ERROR_RESILIENCE } #endif while ((sect_len_incr == sect_esc_val) /* && (k+sect_len < ics->max_sfb)*/) { sect_len += sect_len_incr; sect_len_incr = (uint8_t)faad_getbits(ld, sect_bits DEBUGVAR(1,72,"section_data(): sect_len_incr")); } sect_len += sect_len_incr; ics->sect_start[g][i] = k; ics->sect_end[g][i] = k + sect_len; #if 0 printf("%d\n", ics->sect_start[g][i]); #endif #if 0 printf("%d\n", ics->sect_end[g][i]); #endif if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { if (k + sect_len > 8*15) return 15; if (i >= 8*15) return 15; } else { if (k + sect_len > MAX_SFB) return 15; if (i >= MAX_SFB) return 15; } for (sfb = k; sfb < k + sect_len; sfb++) { ics->sfb_cb[g][sfb] = ics->sect_cb[g][i]; #if 0 printf("%d\n", ics->sfb_cb[g][sfb]); #endif } #if 0 printf(" %6d %6d %6d\n", i, ics->sect_end[g][i], ics->sect_cb[g][i]); #endif k += sect_len; i++; } ics->num_sec[g] = i; /* the sum of all sect_len_incr elements for a given window * group shall equal max_sfb */ if (k != ics->max_sfb) { return 32; } #if 0 printf("%d\n", ics->num_sec[g]); #endif } #if 0 printf("\n"); #endif return 0; } /* * decode_scale_factors() * decodes the scalefactors from the bitstream */ /* * All scalefactors (and also the stereo positions and pns energies) are * transmitted using Huffman coded DPCM relative to the previous active * scalefactor (respectively previous stereo position or previous pns energy, * see subclause 4.6.2 and 4.6.3). The first active scalefactor is * differentially coded relative to the global gain. */ static uint8_t decode_scale_factors(ic_stream *ics, bitfile *ld) { uint8_t g, sfb; int16_t t; int8_t noise_pcm_flag = 1; int16_t scale_factor = ics->global_gain; int16_t is_position = 0; int16_t noise_energy = ics->global_gain - 90; for (g = 0; g < ics->num_window_groups; g++) { for (sfb = 0; sfb < ics->max_sfb; sfb++) { switch (ics->sfb_cb[g][sfb]) { case ZERO_HCB: /* zero book */ ics->scale_factors[g][sfb] = 0; //#define SF_PRINT #ifdef SF_PRINT printf("%d\n", ics->scale_factors[g][sfb]); #endif break; case INTENSITY_HCB: /* intensity books */ case INTENSITY_HCB2: /* decode intensity position */ t = huffman_scale_factor(ld); is_position += (t - 60); ics->scale_factors[g][sfb] = is_position; #ifdef SF_PRINT printf("%d\n", ics->scale_factors[g][sfb]); #endif break; case NOISE_HCB: /* noise books */ #ifndef DRM /* decode noise energy */ if (noise_pcm_flag) { noise_pcm_flag = 0; t = (int16_t)faad_getbits(ld, 9 DEBUGVAR(1,73,"scale_factor_data(): first noise")) - 256; } else { t = huffman_scale_factor(ld); t -= 60; } noise_energy += t; ics->scale_factors[g][sfb] = noise_energy; #ifdef SF_PRINT printf("%d\n", ics->scale_factors[g][sfb]); #endif #else /* PNS not allowed in DRM */ return 29; #endif break; default: /* spectral books */ /* ics->scale_factors[g][sfb] must be between 0 and 255 */ ics->scale_factors[g][sfb] = 0; /* decode scale factor */ t = huffman_scale_factor(ld); scale_factor += (t - 60); if (scale_factor < 0 || scale_factor > 255) return 4; ics->scale_factors[g][sfb] = scale_factor; #ifdef SF_PRINT printf("%d\n", ics->scale_factors[g][sfb]); #endif break; } } } return 0; } /* Table 4.4.26 */ static uint8_t scale_factor_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld) { uint8_t ret = 0; #ifdef PROFILE int64_t count = faad_get_ts(); #endif #ifdef ERROR_RESILIENCE if (!hDecoder->aacScalefactorDataResilienceFlag) { #endif ret = decode_scale_factors(ics, ld); #ifdef ERROR_RESILIENCE } else { /* In ER AAC the parameters for RVLC are seperated from the actual data that holds the scale_factors. Strangely enough, 2 parameters for HCR are put inbetween them. */ ret = rvlc_scale_factor_data(ics, ld); } #endif #ifdef PROFILE count = faad_get_ts() - count; hDecoder->scalefac_cycles += count; #endif return ret; } /* Table 4.4.27 */ static void tns_data(ic_stream *ics, tns_info *tns, bitfile *ld) { uint8_t w, filt, i, start_coef_bits, coef_bits; uint8_t n_filt_bits = 2; uint8_t length_bits = 6; uint8_t order_bits = 5; if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { n_filt_bits = 1; length_bits = 4; order_bits = 3; } for (w = 0; w < ics->num_windows; w++) { tns->n_filt[w] = (uint8_t)faad_getbits(ld, n_filt_bits DEBUGVAR(1,74,"tns_data(): n_filt")); #if 0 printf("%d\n", tns->n_filt[w]); #endif if (tns->n_filt[w]) { if ((tns->coef_res[w] = faad_get1bit(ld DEBUGVAR(1,75,"tns_data(): coef_res"))) & 1) { start_coef_bits = 4; } else { start_coef_bits = 3; } #if 0 printf("%d\n", tns->coef_res[w]); #endif } for (filt = 0; filt < tns->n_filt[w]; filt++) { tns->length[w][filt] = (uint8_t)faad_getbits(ld, length_bits DEBUGVAR(1,76,"tns_data(): length")); #if 0 printf("%d\n", tns->length[w][filt]); #endif tns->order[w][filt] = (uint8_t)faad_getbits(ld, order_bits DEBUGVAR(1,77,"tns_data(): order")); #if 0 printf("%d\n", tns->order[w][filt]); #endif if (tns->order[w][filt]) { tns->direction[w][filt] = faad_get1bit(ld DEBUGVAR(1,78,"tns_data(): direction")); #if 0 printf("%d\n", tns->direction[w][filt]); #endif tns->coef_compress[w][filt] = faad_get1bit(ld DEBUGVAR(1,79,"tns_data(): coef_compress")); #if 0 printf("%d\n", tns->coef_compress[w][filt]); #endif coef_bits = start_coef_bits - tns->coef_compress[w][filt]; for (i = 0; i < tns->order[w][filt]; i++) { tns->coef[w][filt][i] = (uint8_t)faad_getbits(ld, coef_bits DEBUGVAR(1,80,"tns_data(): coef")); #if 0 printf("%d\n", tns->coef[w][filt][i]); #endif } } } } } #ifdef LTP_DEC /* Table 4.4.28 */ static uint8_t ltp_data(NeAACDecStruct *hDecoder, ic_stream *ics, ltp_info *ltp, bitfile *ld) { uint8_t sfb, w; ltp->lag = 0; #ifdef LD_DEC if (hDecoder->object_type == LD) { ltp->lag_update = (uint8_t)faad_getbits(ld, 1 DEBUGVAR(1,142,"ltp_data(): lag_update")); if (ltp->lag_update) { ltp->lag = (uint16_t)faad_getbits(ld, 10 DEBUGVAR(1,81,"ltp_data(): lag")); } } else { #endif ltp->lag = (uint16_t)faad_getbits(ld, 11 DEBUGVAR(1,81,"ltp_data(): lag")); #ifdef LD_DEC } #endif /* Check length of lag */ if (ltp->lag > (hDecoder->frameLength << 1)) return 18; ltp->coef = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,82,"ltp_data(): coef")); if (ics->window_sequence == EIGHT_SHORT_SEQUENCE) { for (w = 0; w < ics->num_windows; w++) { if ((ltp->short_used[w] = faad_get1bit(ld DEBUGVAR(1,83,"ltp_data(): short_used"))) & 1) { ltp->short_lag_present[w] = faad_get1bit(ld DEBUGVAR(1,84,"ltp_data(): short_lag_present")); if (ltp->short_lag_present[w]) { ltp->short_lag[w] = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,85,"ltp_data(): short_lag")); } } } } else { ltp->last_band = (ics->max_sfb < MAX_LTP_SFB ? ics->max_sfb : MAX_LTP_SFB); for (sfb = 0; sfb < ltp->last_band; sfb++) { ltp->long_used[sfb] = faad_get1bit(ld DEBUGVAR(1,86,"ltp_data(): long_used")); } } return 0; } #endif /* Table 4.4.29 */ static uint8_t spectral_data(NeAACDecStruct *hDecoder, ic_stream *ics, bitfile *ld, int16_t *spectral_data) { int8_t i; uint8_t g; uint16_t inc, k, p = 0; uint8_t groups = 0; uint8_t sect_cb; uint8_t result; uint16_t nshort = hDecoder->frameLength/8; #ifdef PROFILE int64_t count = faad_get_ts(); #endif for(g = 0; g < ics->num_window_groups; g++) { p = groups*nshort; for (i = 0; i < ics->num_sec[g]; i++) { sect_cb = ics->sect_cb[g][i]; inc = (sect_cb >= FIRST_PAIR_HCB) ? 2 : 4; switch (sect_cb) { case ZERO_HCB: case NOISE_HCB: case INTENSITY_HCB: case INTENSITY_HCB2: //#define SD_PRINT #ifdef SD_PRINT { int j; for (j = ics->sect_sfb_offset[g][ics->sect_start[g][i]]; j < ics->sect_sfb_offset[g][ics->sect_end[g][i]]; j++) { printf("%d\n", 0); } } #endif //#define SFBO_PRINT #ifdef SFBO_PRINT printf("%d\n", ics->sect_sfb_offset[g][ics->sect_start[g][i]]); #endif p += (ics->sect_sfb_offset[g][ics->sect_end[g][i]] - ics->sect_sfb_offset[g][ics->sect_start[g][i]]); break; default: #ifdef SFBO_PRINT printf("%d\n", ics->sect_sfb_offset[g][ics->sect_start[g][i]]); #endif for (k = ics->sect_sfb_offset[g][ics->sect_start[g][i]]; k < ics->sect_sfb_offset[g][ics->sect_end[g][i]]; k += inc) { if ((result = huffman_spectral_data(sect_cb, ld, &spectral_data[p])) > 0) return result; #ifdef SD_PRINT { int j; for (j = p; j < p+inc; j++) { printf("%d\n", spectral_data[j]); } } #endif p += inc; } break; } } groups += ics->window_group_length[g]; } #ifdef PROFILE count = faad_get_ts() - count; hDecoder->spectral_cycles += count; #endif return 0; } /* Table 4.4.30 */ static uint16_t extension_payload(bitfile *ld, drc_info *drc, uint16_t count) { uint16_t i, n, dataElementLength; uint8_t dataElementLengthPart; uint8_t align = 4, data_element_version, loopCounter; uint8_t extension_type = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,87,"extension_payload(): extension_type")); switch (extension_type) { case EXT_DYNAMIC_RANGE: drc->present = 1; n = dynamic_range_info(ld, drc); return n; case EXT_FILL_DATA: /* fill_nibble = */ faad_getbits(ld, 4 DEBUGVAR(1,136,"extension_payload(): fill_nibble")); /* must be �0000� */ for (i = 0; i < count-1; i++) { /* fill_byte[i] = */ faad_getbits(ld, 8 DEBUGVAR(1,88,"extension_payload(): fill_byte")); /* must be �10100101� */ } return count; case EXT_DATA_ELEMENT: data_element_version = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,400,"extension_payload(): data_element_version")); switch (data_element_version) { case ANC_DATA: loopCounter = 0; dataElementLength = 0; do { dataElementLengthPart = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(1,401,"extension_payload(): dataElementLengthPart")); dataElementLength += dataElementLengthPart; loopCounter++; } while (dataElementLengthPart == 255); for (i = 0; i < dataElementLength; i++) { /* data_element_byte[i] = */ faad_getbits(ld, 8 DEBUGVAR(1,402,"extension_payload(): data_element_byte")); return (dataElementLength+loopCounter+1); } default: align = 0; } case EXT_FIL: default: faad_getbits(ld, align DEBUGVAR(1,88,"extension_payload(): fill_nibble")); for (i = 0; i < count-1; i++) { /* other_bits[i] = */ faad_getbits(ld, 8 DEBUGVAR(1,89,"extension_payload(): fill_bit")); } return count; } } /* Table 4.4.31 */ static uint8_t dynamic_range_info(bitfile *ld, drc_info *drc) { uint8_t i, n = 1; uint8_t band_incr; drc->num_bands = 1; if (faad_get1bit(ld DEBUGVAR(1,90,"dynamic_range_info(): has instance_tag")) & 1) { drc->pce_instance_tag = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,91,"dynamic_range_info(): pce_instance_tag")); /* drc->drc_tag_reserved_bits = */ faad_getbits(ld, 4 DEBUGVAR(1,92,"dynamic_range_info(): drc_tag_reserved_bits")); n++; } drc->excluded_chns_present = faad_get1bit(ld DEBUGVAR(1,93,"dynamic_range_info(): excluded_chns_present")); if (drc->excluded_chns_present == 1) { n += excluded_channels(ld, drc); } if (faad_get1bit(ld DEBUGVAR(1,94,"dynamic_range_info(): has bands data")) & 1) { band_incr = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,95,"dynamic_range_info(): band_incr")); /* drc->drc_bands_reserved_bits = */ faad_getbits(ld, 4 DEBUGVAR(1,96,"dynamic_range_info(): drc_bands_reserved_bits")); n++; drc->num_bands += band_incr; for (i = 0; i < drc->num_bands; i++) { drc->band_top[i] = (uint8_t)faad_getbits(ld, 8 DEBUGVAR(1,97,"dynamic_range_info(): band_top")); n++; } } if (faad_get1bit(ld DEBUGVAR(1,98,"dynamic_range_info(): has prog_ref_level")) & 1) { drc->prog_ref_level = (uint8_t)faad_getbits(ld, 7 DEBUGVAR(1,99,"dynamic_range_info(): prog_ref_level")); /* drc->prog_ref_level_reserved_bits = */ faad_get1bit(ld DEBUGVAR(1,100,"dynamic_range_info(): prog_ref_level_reserved_bits")); n++; } for (i = 0; i < drc->num_bands; i++) { drc->dyn_rng_sgn[i] = faad_get1bit(ld DEBUGVAR(1,101,"dynamic_range_info(): dyn_rng_sgn")); drc->dyn_rng_ctl[i] = (uint8_t)faad_getbits(ld, 7 DEBUGVAR(1,102,"dynamic_range_info(): dyn_rng_ctl")); n++; } return n; } /* Table 4.4.32 */ static uint8_t excluded_channels(bitfile *ld, drc_info *drc) { uint8_t i, n = 0; uint8_t num_excl_chan = 7; for (i = 0; i < 7; i++) { drc->exclude_mask[i] = faad_get1bit(ld DEBUGVAR(1,103,"excluded_channels(): exclude_mask")); } n++; while ((drc->additional_excluded_chns[n-1] = faad_get1bit(ld DEBUGVAR(1,104,"excluded_channels(): additional_excluded_chns"))) == 1) { for (i = num_excl_chan; i < num_excl_chan+7; i++) { drc->exclude_mask[i] = faad_get1bit(ld DEBUGVAR(1,105,"excluded_channels(): exclude_mask")); } n++; num_excl_chan += 7; } return n; } /* Annex A: Audio Interchange Formats */ /* Table 1.A.2 */ void get_adif_header(adif_header *adif, bitfile *ld) { uint8_t i; /* adif_id[0] = */ faad_getbits(ld, 8 DEBUGVAR(1,106,"get_adif_header(): adif_id[0]")); /* adif_id[1] = */ faad_getbits(ld, 8 DEBUGVAR(1,107,"get_adif_header(): adif_id[1]")); /* adif_id[2] = */ faad_getbits(ld, 8 DEBUGVAR(1,108,"get_adif_header(): adif_id[2]")); /* adif_id[3] = */ faad_getbits(ld, 8 DEBUGVAR(1,109,"get_adif_header(): adif_id[3]")); adif->copyright_id_present = faad_get1bit(ld DEBUGVAR(1,110,"get_adif_header(): copyright_id_present")); if(adif->copyright_id_present) { for (i = 0; i < 72/8; i++) { adif->copyright_id[i] = (int8_t)faad_getbits(ld, 8 DEBUGVAR(1,111,"get_adif_header(): copyright_id")); } adif->copyright_id[i] = 0; } adif->original_copy = faad_get1bit(ld DEBUGVAR(1,112,"get_adif_header(): original_copy")); adif->home = faad_get1bit(ld DEBUGVAR(1,113,"get_adif_header(): home")); adif->bitstream_type = faad_get1bit(ld DEBUGVAR(1,114,"get_adif_header(): bitstream_type")); adif->bitrate = faad_getbits(ld, 23 DEBUGVAR(1,115,"get_adif_header(): bitrate")); adif->num_program_config_elements = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,116,"get_adif_header(): num_program_config_elements")); for (i = 0; i < adif->num_program_config_elements + 1; i++) { if(adif->bitstream_type == 0) { adif->adif_buffer_fullness = faad_getbits(ld, 20 DEBUGVAR(1,117,"get_adif_header(): adif_buffer_fullness")); } else { adif->adif_buffer_fullness = 0; } program_config_element(&adif->pce[i], ld); } } /* Table 1.A.5 */ uint8_t adts_frame(adts_header *adts, bitfile *ld) { /* faad_byte_align(ld); */ if (adts_fixed_header(adts, ld)) return 5; adts_variable_header(adts, ld); adts_error_check(adts, ld); return 0; } /* Table 1.A.6 */ static uint8_t adts_fixed_header(adts_header *adts, bitfile *ld) { uint16_t i; uint8_t sync_err = 1; /* try to recover from sync errors */ for (i = 0; i < 768; i++) { adts->syncword = (uint16_t)faad_showbits(ld, 12); if (adts->syncword != 0xFFF) { faad_getbits(ld, 8 DEBUGVAR(0,0,"")); } else { sync_err = 0; faad_getbits(ld, 12 DEBUGVAR(1,118,"adts_fixed_header(): syncword")); break; } } if (sync_err) return 5; adts->id = faad_get1bit(ld DEBUGVAR(1,119,"adts_fixed_header(): id")); adts->layer = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,120,"adts_fixed_header(): layer")); adts->protection_absent = faad_get1bit(ld DEBUGVAR(1,121,"adts_fixed_header(): protection_absent")); adts->profile = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,122,"adts_fixed_header(): profile")); adts->sf_index = (uint8_t)faad_getbits(ld, 4 DEBUGVAR(1,123,"adts_fixed_header(): sf_index")); adts->private_bit = faad_get1bit(ld DEBUGVAR(1,124,"adts_fixed_header(): private_bit")); adts->channel_configuration = (uint8_t)faad_getbits(ld, 3 DEBUGVAR(1,125,"adts_fixed_header(): channel_configuration")); adts->original = faad_get1bit(ld DEBUGVAR(1,126,"adts_fixed_header(): original")); adts->home = faad_get1bit(ld DEBUGVAR(1,127,"adts_fixed_header(): home")); if (adts->old_format == 1) { /* Removed in corrigendum 14496-3:2002 */ if (adts->id == 0) { adts->emphasis = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,128,"adts_fixed_header(): emphasis")); } } return 0; } /* Table 1.A.7 */ static void adts_variable_header(adts_header *adts, bitfile *ld) { adts->copyright_identification_bit = faad_get1bit(ld DEBUGVAR(1,129,"adts_variable_header(): copyright_identification_bit")); adts->copyright_identification_start = faad_get1bit(ld DEBUGVAR(1,130,"adts_variable_header(): copyright_identification_start")); adts->aac_frame_length = (uint16_t)faad_getbits(ld, 13 DEBUGVAR(1,131,"adts_variable_header(): aac_frame_length")); adts->adts_buffer_fullness = (uint16_t)faad_getbits(ld, 11 DEBUGVAR(1,132,"adts_variable_header(): adts_buffer_fullness")); adts->no_raw_data_blocks_in_frame = (uint8_t)faad_getbits(ld, 2 DEBUGVAR(1,133,"adts_variable_header(): no_raw_data_blocks_in_frame")); } /* Table 1.A.8 */ static void adts_error_check(adts_header *adts, bitfile *ld) { if (adts->protection_absent == 0) { adts->crc_check = (uint16_t)faad_getbits(ld, 16 DEBUGVAR(1,134,"adts_error_check(): crc_check")); } } /* LATM parsing functions */ static uint32_t latm_get_value(bitfile *ld) { uint32_t l, value; uint8_t bytesForValue; bytesForValue = (uint8_t)faad_getbits(ld, 2); value = 0; for(l=0; l<bytesForValue; l++) value = (value << 8) | (uint8_t)faad_getbits(ld, 8); return value; } static uint32_t latmParsePayload(latm_header *latm, bitfile *ld) { //assuming there's only one program with a single layer and 1 subFrame, //allStreamsSametimeframing is set, uint32_t framelen; uint8_t tmp; //this should be the payload length field for the current configuration framelen = 0; if(latm->framelen_type==0) { do { tmp = (uint8_t)faad_getbits(ld, 8); framelen += tmp; } while(tmp==0xff); } else if(latm->framelen_type==1) framelen=latm->frameLength; return framelen; } static uint32_t latmAudioMuxElement(latm_header *latm, bitfile *ld) { uint32_t ascLen, asc_bits=0; uint32_t x1, y1, m, n, i; program_config pce; mp4AudioSpecificConfig mp4ASC; latm->useSameStreamMux = (uint8_t)faad_getbits(ld, 1); if(!latm->useSameStreamMux) { //parseSameStreamMuxConfig latm->version = (uint8_t) faad_getbits(ld, 1); if(latm->version) latm->versionA = (uint8_t) faad_getbits(ld, 1); if(latm->versionA) { //dunno the payload format for versionA fprintf(stderr, "versionA not supported\n"); return 0; } if(latm->version) //read taraBufferFullness latm_get_value(ld); latm->allStreamsSameTimeFraming = (uint8_t)faad_getbits(ld, 1); latm->numSubFrames = (uint8_t)faad_getbits(ld, 6) + 1; latm->numPrograms = (uint8_t)faad_getbits(ld, 4) + 1; latm->numLayers = faad_getbits(ld, 3) + 1; if(latm->numPrograms>1 || !latm->allStreamsSameTimeFraming || latm->numSubFrames>1 || latm->numLayers>1) { fprintf(stderr, "\r\nUnsupported LATM configuration: %d programs/ %d subframes, %d layers, allstreams: %d\n", latm->numPrograms, latm->numSubFrames, latm->numLayers, latm->allStreamsSameTimeFraming); return 0; } ascLen = 0; if(latm->version) ascLen = latm_get_value(ld); x1 = faad_get_processed_bits(ld); if(AudioSpecificConfigFromBitfile(ld, &mp4ASC, &pce, 0, 1) < 0) return 0; //horrid hack to unread the ASC bits and store them in latm->ASC //the correct code would rely on an ideal faad_ungetbits() y1 = faad_get_processed_bits(ld); if((y1-x1) <= MAX_ASC_BYTES*8) { faad_rewindbits(ld); m = x1; while(m>0) { n = min(m, 32); faad_getbits(ld, n); m -= n; } i = 0; m = latm->ASCbits = y1 - x1; while(m > 0) { n = min(m, 8); latm->ASC[i++] = (uint8_t) faad_getbits(ld, n); m -= n; } } asc_bits = y1-x1; if(ascLen>asc_bits) faad_getbits(ld, ascLen-asc_bits); latm->framelen_type = (uint8_t) faad_getbits(ld, 3); if(latm->framelen_type == 0) { latm->frameLength = 0; faad_getbits(ld, 8); //buffer fullness for frame_len_type==0, useless } else if(latm->framelen_type == 1) { latm->frameLength = faad_getbits(ld, 9); if(latm->frameLength==0) { fprintf(stderr, "Invalid frameLength: 0\r\n"); return 0; } latm->frameLength = (latm->frameLength+20)*8; } else { //hellish CELP or HCVX stuff, discard fprintf(stderr, "Unsupported CELP/HCVX framelentype: %d\n", latm->framelen_type); return 0; } latm->otherDataLenBits = 0; if(faad_getbits(ld, 1)) { //other data present int esc, tmp; if(latm->version) latm->otherDataLenBits = latm_get_value(ld); else do { esc = faad_getbits(ld, 1); tmp = faad_getbits(ld, 8); latm->otherDataLenBits = (latm->otherDataLenBits << 8) + tmp; } while(esc); } if(faad_getbits(ld, 1)) //crc faad_getbits(ld, 8); latm->inited = 1; } //read payload if(latm->inited) return latmParsePayload(latm, ld); else return 0; } uint32_t faad_latm_frame(latm_header *latm, bitfile *ld) { uint16_t len; uint32_t initpos, endpos, firstpos, ret; firstpos = faad_get_processed_bits(ld); while (ld->bytes_left) { faad_byte_align(ld); if(faad_showbits(ld, 11) != 0x2B7) { faad_getbits(ld, 8); continue; } faad_getbits(ld, 11); len = faad_getbits(ld, 13); if(!len) continue; initpos = faad_get_processed_bits(ld); ret = latmAudioMuxElement(latm, ld); endpos = faad_get_processed_bits(ld); if(ret>0) return (len*8)-(endpos-initpos); //faad_getbits(ld, initpos-endpos); //go back to initpos, but is valid a getbits(-N) ? } return -1U; }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_1030_1
crossvul-cpp_data_good_2637_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1 << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); (void) InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImage) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*black)); white=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImage) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(enhance_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); continue; } GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); SetPixelViaPixelInfo(image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap(intensity)+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) ResetMagickMemory(black,0,sizeof(*black)); (void) ResetMagickMemory(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImage) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,1.0/gamma))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; #else if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].alpha,1.0/gamma); #endif } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; #if !defined(MAGICKCORE_HDRI_SUPPORT) q[j]=gamma_map[ScaleQuantumToMap(q[j])]; #else q[j]=QuantumRange*gamma_pow(QuantumScale*q[j],1.0/gamma); #endif } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImage) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImage) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); pixel2=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel3=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel3); offset+=cube_size; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel4); pixel=zero; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, point.z,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImage) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; if (fabs(white_point-black_point) < MagickEpsilon) return(pixel); scale=1.0/(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), 1.0/gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImage) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImage) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if( grayscale != MagickFalse ) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if ((GetPixelWriteMask(image,q) == 0) || IsPixelGray(image,q) != MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImage) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_2637_0
crossvul-cpp_data_bad_345_2
/* * card-muscle.c: Support for MuscleCard Applet from musclecard.com * * Copyright (C) 2006, Identity Alliance, Thomas Harning <support@identityalliance.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <string.h> #include "internal.h" #include "cardctl.h" #include "muscle.h" #include "muscle-filesystem.h" #include "types.h" #include "opensc.h" static struct sc_card_operations muscle_ops; static const struct sc_card_operations *iso_ops = NULL; static struct sc_card_driver muscle_drv = { "MuscleApplet", "muscle", &muscle_ops, NULL, 0, NULL }; static struct sc_atr_table muscle_atrs[] = { /* Tyfone JCOP 242R2 cards */ { "3b:6d:00:00:ff:54:79:66:6f:6e:65:20:32:34:32:52:32", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP242R2_NO_EXT_APDU, 0, NULL }, /* Aladdin eToken PRO USB 72K Java */ { "3b:d5:18:00:81:31:3a:7d:80:73:c8:21:10:30", NULL, NULL, SC_CARD_TYPE_MUSCLE_ETOKEN_72K, 0, NULL }, /* JCOP31 v2.4.1 contact interface */ { "3b:f8:13:00:00:81:31:fe:45:4a:43:4f:50:76:32:34:31:b7", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP241, 0, NULL }, /* JCOP31 v2.4.1 RF interface */ { "3b:88:80:01:4a:43:4f:50:76:32:34:31:5e", NULL, NULL, SC_CARD_TYPE_MUSCLE_JCOP241, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; #define MUSCLE_DATA(card) ( (muscle_private_t*)card->drv_data ) #define MUSCLE_FS(card) ( ((muscle_private_t*)card->drv_data)->fs ) typedef struct muscle_private { sc_security_env_t env; unsigned short verifiedPins; mscfs_t *fs; int rsa_key_ref; } muscle_private_t; static int muscle_finish(sc_card_t *card) { muscle_private_t *priv = MUSCLE_DATA(card); mscfs_free(priv->fs); free(priv); return 0; } static u8 muscleAppletId[] = { 0xA0, 0x00,0x00,0x00, 0x01, 0x01 }; static int muscle_match_card(sc_card_t *card) { sc_apdu_t apdu; u8 response[64]; int r; /* Since we send an APDU, the card's logout function may be called... * however it's not always properly nulled out... */ card->ops->logout = NULL; if (msc_select_applet(card, muscleAppletId, sizeof muscleAppletId) == 1) { /* Muscle applet is present, check the protocol version to be sure */ sc_format_apdu(card, &apdu, SC_APDU_CASE_2, 0x3C, 0x00, 0x00); apdu.cla = 0xB0; apdu.le = 64; apdu.resplen = 64; apdu.resp = response; r = sc_transmit_apdu(card, &apdu); if (r == SC_SUCCESS && response[0] == 0x01) { card->type = SC_CARD_TYPE_MUSCLE_V1; } else { card->type = SC_CARD_TYPE_MUSCLE_GENERIC; } return 1; } return 0; } /* Since Musclecard has a different ACL system then PKCS15 * objects need to have their READ/UPDATE/DELETE permissions mapped for files * and directory ACLS need to be set * For keys.. they have different ACLS, but are accessed in different locations, so it shouldn't be an issue here */ static unsigned short muscle_parse_singleAcl(const sc_acl_entry_t* acl) { unsigned short acl_entry = 0; while(acl) { int key = acl->key_ref; int method = acl->method; switch(method) { case SC_AC_NEVER: return 0xFFFF; /* Ignore... other items overwrite these */ case SC_AC_NONE: case SC_AC_UNKNOWN: break; case SC_AC_CHV: acl_entry |= (1 << key); /* Assuming key 0 == SO */ break; case SC_AC_AUT: case SC_AC_TERM: case SC_AC_PRO: default: /* Ignored */ break; } acl = acl->next; } return acl_entry; } static void muscle_parse_acls(const sc_file_t* file, unsigned short* read_perm, unsigned short* write_perm, unsigned short* delete_perm) { assert(read_perm && write_perm && delete_perm); *read_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_READ)); *write_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_UPDATE)); *delete_perm = muscle_parse_singleAcl(sc_file_get_acl_entry(file, SC_AC_OP_DELETE)); } static int muscle_create_directory(sc_card_t *card, sc_file_t *file) { mscfs_t *fs = MUSCLE_FS(card); msc_id objectId; u8* oid = objectId.id; unsigned id = file->id; unsigned short read_perm = 0, write_perm = 0, delete_perm = 0; int objectSize; int r; if(id == 0) /* No null name files */ return SC_ERROR_INVALID_ARGUMENTS; /* No nesting directories */ if(fs->currentPath[0] != 0x3F || fs->currentPath[1] != 0x00) return SC_ERROR_NOT_SUPPORTED; oid[0] = ((id & 0xFF00) >> 8) & 0xFF; oid[1] = id & 0xFF; oid[2] = oid[3] = 0; objectSize = file->size; muscle_parse_acls(file, &read_perm, &write_perm, &delete_perm); r = msc_create_object(card, objectId, objectSize, read_perm, write_perm, delete_perm); mscfs_clear_cache(fs); if(r >= 0) return 0; return r; } static int muscle_create_file(sc_card_t *card, sc_file_t *file) { mscfs_t *fs = MUSCLE_FS(card); int objectSize = file->size; unsigned short read_perm = 0, write_perm = 0, delete_perm = 0; msc_id objectId; int r; if(file->type == SC_FILE_TYPE_DF) return muscle_create_directory(card, file); if(file->type != SC_FILE_TYPE_WORKING_EF) return SC_ERROR_NOT_SUPPORTED; if(file->id == 0) /* No null name files */ return SC_ERROR_INVALID_ARGUMENTS; muscle_parse_acls(file, &read_perm, &write_perm, &delete_perm); mscfs_lookup_local(fs, file->id, &objectId); r = msc_create_object(card, objectId, objectSize, read_perm, write_perm, delete_perm); mscfs_clear_cache(fs); if(r >= 0) return 0; return r; } static int muscle_read_binary(sc_card_t *card, unsigned int idx, u8* buf, size_t count, unsigned long flags) { mscfs_t *fs = MUSCLE_FS(card); int r; msc_id objectId; u8* oid = objectId.id; mscfs_file_t *file; r = mscfs_check_selection(fs, -1); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); file = &fs->cache.array[fs->currentFileIndex]; objectId = file->objectId; /* memcpy(objectId.id, file->objectId.id, 4); */ if(!file->ef) { oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; } r = msc_read_object(card, objectId, idx, buf, count); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } static int muscle_update_binary(sc_card_t *card, unsigned int idx, const u8* buf, size_t count, unsigned long flags) { mscfs_t *fs = MUSCLE_FS(card); int r; mscfs_file_t *file; msc_id objectId; u8* oid = objectId.id; r = mscfs_check_selection(fs, -1); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); file = &fs->cache.array[fs->currentFileIndex]; objectId = file->objectId; /* memcpy(objectId.id, file->objectId.id, 4); */ if(!file->ef) { oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; } if(file->size < idx + count) { int newFileSize = idx + count; u8* buffer = malloc(newFileSize); if(buffer == NULL) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); r = msc_read_object(card, objectId, 0, buffer, file->size); /* TODO: RETRIEVE ACLS */ if(r < 0) goto update_bin_free_buffer; r = msc_delete_object(card, objectId, 0); if(r < 0) goto update_bin_free_buffer; r = msc_create_object(card, objectId, newFileSize, 0,0,0); if(r < 0) goto update_bin_free_buffer; memcpy(buffer + idx, buf, count); r = msc_update_object(card, objectId, 0, buffer, newFileSize); if(r < 0) goto update_bin_free_buffer; file->size = newFileSize; update_bin_free_buffer: free(buffer); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r); } else { r = msc_update_object(card, objectId, idx, buf, count); } /* mscfs_clear_cache(fs); */ return r; } /* TODO: Evaluate correctness */ static int muscle_delete_mscfs_file(sc_card_t *card, mscfs_file_t *file_data) { mscfs_t *fs = MUSCLE_FS(card); msc_id id = file_data->objectId; u8* oid = id.id; int r; if(!file_data->ef) { int x; mscfs_file_t *childFile; /* Delete children */ mscfs_check_cache(fs); sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "DELETING Children of: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); for(x = 0; x < fs->cache.size; x++) { msc_id objectId; childFile = &fs->cache.array[x]; objectId = childFile->objectId; if(0 == memcmp(oid + 2, objectId.id, 2)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "DELETING: %02X%02X%02X%02X\n", objectId.id[0],objectId.id[1], objectId.id[2],objectId.id[3]); r = muscle_delete_mscfs_file(card, childFile); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } } oid[0] = oid[2]; oid[1] = oid[3]; oid[2] = oid[3] = 0; /* ??? objectId = objectId >> 16; */ } if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4)) || (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) { } r = msc_delete_object(card, id, 1); /* Check if its the root... this file generally is virtual * So don't return an error if it fails */ if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4)) || (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) return 0; if(r < 0) { printf("ID: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } return 0; } static int muscle_delete_file(sc_card_t *card, const sc_path_t *path_in) { mscfs_t *fs = MUSCLE_FS(card); mscfs_file_t *file_data = NULL; int r = 0; r = mscfs_loadFileInfo(fs, path_in->value, path_in->len, &file_data, NULL); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); r = muscle_delete_mscfs_file(card, file_data); mscfs_clear_cache(fs); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); return 0; } static void muscle_load_single_acl(sc_file_t* file, int operation, unsigned short acl) { int key; /* Everybody by default.... */ sc_file_add_acl_entry(file, operation, SC_AC_NONE, 0); if(acl == 0xFFFF) { sc_file_add_acl_entry(file, operation, SC_AC_NEVER, 0); return; } for(key = 0; key < 16; key++) { if(acl >> key & 1) { sc_file_add_acl_entry(file, operation, SC_AC_CHV, key); } } } static void muscle_load_file_acls(sc_file_t* file, mscfs_file_t *file_data) { muscle_load_single_acl(file, SC_AC_OP_READ, file_data->read); muscle_load_single_acl(file, SC_AC_OP_WRITE, file_data->write); muscle_load_single_acl(file, SC_AC_OP_UPDATE, file_data->write); muscle_load_single_acl(file, SC_AC_OP_DELETE, file_data->delete); } static void muscle_load_dir_acls(sc_file_t* file, mscfs_file_t *file_data) { muscle_load_single_acl(file, SC_AC_OP_SELECT, 0); muscle_load_single_acl(file, SC_AC_OP_LIST_FILES, 0); muscle_load_single_acl(file, SC_AC_OP_LOCK, 0xFFFF); muscle_load_single_acl(file, SC_AC_OP_DELETE, file_data->delete); muscle_load_single_acl(file, SC_AC_OP_CREATE, file_data->write); } /* Required type = -1 for don't care, 1 for EF, 0 for DF */ static int select_item(sc_card_t *card, const sc_path_t *path_in, sc_file_t ** file_out, int requiredType) { mscfs_t *fs = MUSCLE_FS(card); mscfs_file_t *file_data = NULL; int pathlen = path_in->len; int r = 0; int objectIndex; u8* oid; mscfs_check_cache(fs); r = mscfs_loadFileInfo(fs, path_in->value, path_in->len, &file_data, &objectIndex); if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); /* Check if its the right type */ if(requiredType >= 0 && requiredType != file_data->ef) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_INVALID_ARGUMENTS); } oid = file_data->objectId.id; /* Is it a file or directory */ if(file_data->ef) { fs->currentPath[0] = oid[0]; fs->currentPath[1] = oid[1]; fs->currentFile[0] = oid[2]; fs->currentFile[1] = oid[3]; } else { fs->currentPath[0] = oid[pathlen - 2]; fs->currentPath[1] = oid[pathlen - 1]; fs->currentFile[0] = 0; fs->currentFile[1] = 0; } fs->currentFileIndex = objectIndex; if(file_out) { sc_file_t *file; file = sc_file_new(); file->path = *path_in; file->size = file_data->size; file->id = (oid[2] << 8) | oid[3]; if(!file_data->ef) { file->type = SC_FILE_TYPE_DF; } else { file->type = SC_FILE_TYPE_WORKING_EF; file->ef_structure = SC_FILE_EF_TRANSPARENT; } /* Setup ACLS */ if(file_data->ef) { muscle_load_file_acls(file, file_data); } else { muscle_load_dir_acls(file, file_data); /* Setup directory acls... */ } file->magic = SC_FILE_MAGIC; *file_out = file; } return 0; } static int muscle_select_file(sc_card_t *card, const sc_path_t *path_in, sc_file_t **file_out) { int r; assert(card != NULL && path_in != NULL); switch (path_in->type) { case SC_PATH_TYPE_FILE_ID: r = select_item(card, path_in, file_out, 1); break; case SC_PATH_TYPE_DF_NAME: r = select_item(card, path_in, file_out, 0); break; case SC_PATH_TYPE_PATH: r = select_item(card, path_in, file_out, -1); break; default: SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if(r > 0) r = 0; SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r); } static int _listFile(mscfs_file_t *file, int reset, void *udata) { int next = reset ? 0x00 : 0x01; return msc_list_objects( (sc_card_t*)udata, next, file); } static int muscle_init(sc_card_t *card) { muscle_private_t *priv; card->name = "MuscleApplet"; card->drv_data = malloc(sizeof(muscle_private_t)); if(!card->drv_data) { SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); } memset(card->drv_data, 0, sizeof(muscle_private_t)); priv = MUSCLE_DATA(card); priv->verifiedPins = 0; priv->fs = mscfs_new(); if(!priv->fs) { free(card->drv_data); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, SC_ERROR_OUT_OF_MEMORY); } priv->fs->udata = card; priv->fs->listFile = _listFile; card->cla = 0xB0; card->flags |= SC_CARD_FLAG_RNG; card->caps |= SC_CARD_CAP_RNG; /* Card type detection */ _sc_match_atr(card, muscle_atrs, &card->type); if(card->type == SC_CARD_TYPE_MUSCLE_ETOKEN_72K) { card->caps |= SC_CARD_CAP_APDU_EXT; } if(card->type == SC_CARD_TYPE_MUSCLE_JCOP241) { card->caps |= SC_CARD_CAP_APDU_EXT; } if (!(card->caps & SC_CARD_CAP_APDU_EXT)) { card->max_recv_size = 255; card->max_send_size = 255; } if(card->type == SC_CARD_TYPE_MUSCLE_JCOP242R2_NO_EXT_APDU) { /* Tyfone JCOP v242R2 card that doesn't support extended APDUs */ } /* FIXME: Card type detection */ if (1) { unsigned long flags; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_HASH_NONE; flags |= SC_ALGORITHM_ONBOARD_KEY_GEN; _sc_card_add_rsa_alg(card, 1024, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return SC_SUCCESS; } static int muscle_list_files(sc_card_t *card, u8 *buf, size_t bufLen) { muscle_private_t* priv = MUSCLE_DATA(card); mscfs_t *fs = priv->fs; int x; int count = 0; mscfs_check_cache(priv->fs); for(x = 0; x < fs->cache.size; x++) { u8* oid= fs->cache.array[x].objectId.id; sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "FILE: %02X%02X%02X%02X\n", oid[0],oid[1],oid[2],oid[3]); if(0 == memcmp(fs->currentPath, oid, 2)) { buf[0] = oid[2]; buf[1] = oid[3]; if(buf[0] == 0x00 && buf[1] == 0x00) continue; /* No directories/null names outside of root */ buf += 2; count+=2; } } return count; } static int muscle_pin_cmd(sc_card_t *card, struct sc_pin_cmd_data *cmd, int *tries_left) { muscle_private_t* priv = MUSCLE_DATA(card); const int bufferLength = MSC_MAX_PIN_COMMAND_LENGTH; u8 buffer[MSC_MAX_PIN_COMMAND_LENGTH]; switch(cmd->cmd) { case SC_PIN_CMD_VERIFY: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; int r; msc_verify_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len); cmd->apdu = &apdu; cmd->pin1.offset = 5; r = iso_ops->pin_cmd(card, cmd, tries_left); if(r >= 0) priv->verifiedPins |= (1 << cmd->pin_reference); return r; } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } case SC_PIN_CMD_CHANGE: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; msc_change_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len, cmd->pin2.data, cmd->pin2.len); cmd->apdu = &apdu; return iso_ops->pin_cmd(card, cmd, tries_left); } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } case SC_PIN_CMD_UNBLOCK: switch(cmd->pin_type) { case SC_AC_CHV: { sc_apdu_t apdu; msc_unblock_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len); cmd->apdu = &apdu; return iso_ops->pin_cmd(card, cmd, tries_left); } case SC_AC_TERM: case SC_AC_PRO: case SC_AC_AUT: case SC_AC_NONE: default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n"); return SC_ERROR_NOT_SUPPORTED; } default: sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported command\n"); return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_extract_key(sc_card_t *card, sc_cardctl_muscle_key_info_t *info) { /* CURRENTLY DONT SUPPORT EXTRACTING PRIVATE KEYS... */ switch(info->keyType) { case 1: /* RSA */ return msc_extract_rsa_public_key(card, info->keyLocation, &info->modLength, &info->modValue, &info->expLength, &info->expValue); default: return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_import_key(sc_card_t *card, sc_cardctl_muscle_key_info_t *info) { /* CURRENTLY DONT SUPPORT EXTRACTING PRIVATE KEYS... */ switch(info->keyType) { case 0x02: /* RSA_PRIVATE */ case 0x03: /* RSA_PRIVATE_CRT */ return msc_import_key(card, info->keyLocation, info); default: return SC_ERROR_NOT_SUPPORTED; } } static int muscle_card_generate_key(sc_card_t *card, sc_cardctl_muscle_gen_key_info_t *info) { return msc_generate_keypair(card, info->privateKeyLocation, info->publicKeyLocation, info->keyType, info->keySize, 0); } static int muscle_card_verified_pins(sc_card_t *card, sc_cardctl_muscle_verified_pins_info_t *info) { muscle_private_t* priv = MUSCLE_DATA(card); info->verifiedPins = priv->verifiedPins; return 0; } static int muscle_card_ctl(sc_card_t *card, unsigned long request, void *data) { switch(request) { case SC_CARDCTL_MUSCLE_GENERATE_KEY: return muscle_card_generate_key(card, (sc_cardctl_muscle_gen_key_info_t*) data); case SC_CARDCTL_MUSCLE_EXTRACT_KEY: return muscle_card_extract_key(card, (sc_cardctl_muscle_key_info_t*) data); case SC_CARDCTL_MUSCLE_IMPORT_KEY: return muscle_card_import_key(card, (sc_cardctl_muscle_key_info_t*) data); case SC_CARDCTL_MUSCLE_VERIFIED_PINS: return muscle_card_verified_pins(card, (sc_cardctl_muscle_verified_pins_info_t*) data); default: return SC_ERROR_NOT_SUPPORTED; /* Unsupported.. whatever it is */ } } static int muscle_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { muscle_private_t* priv = MUSCLE_DATA(card); if (env->operation != SC_SEC_OPERATION_SIGN && env->operation != SC_SEC_OPERATION_DECIPHER) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid crypto operation supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } if (env->algorithm != SC_ALGORITHM_RSA) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid crypto algorithm supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } /* ADJUST FOR PKCS1 padding support for decryption only */ if ((env->algorithm_flags & SC_ALGORITHM_RSA_PADS) || (env->algorithm_flags & SC_ALGORITHM_RSA_HASHES)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Card supports only raw RSA.\n"); return SC_ERROR_NOT_SUPPORTED; } if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { if (env->key_ref_len != 1 || (env->key_ref[0] > 0x0F)) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Invalid key reference supplied.\n"); return SC_ERROR_NOT_SUPPORTED; } priv->rsa_key_ref = env->key_ref[0]; } if (env->flags & SC_SEC_ENV_ALG_REF_PRESENT) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Algorithm reference not supported.\n"); return SC_ERROR_NOT_SUPPORTED; } /* if (env->flags & SC_SEC_ENV_FILE_REF_PRESENT) if (memcmp(env->file_ref.value, "\x00\x12", 2) != 0) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "File reference is not 0012.\n"); return SC_ERROR_NOT_SUPPORTED; } */ priv->env = *env; return 0; } static int muscle_restore_security_env(sc_card_t *card, int se_num) { muscle_private_t* priv = MUSCLE_DATA(card); memset(&priv->env, 0, sizeof(priv->env)); return 0; } static int muscle_decipher(sc_card_t * card, const u8 * crgram, size_t crgram_len, u8 * out, size_t out_len) { muscle_private_t* priv = MUSCLE_DATA(card); u8 key_id; int r; /* sanity check */ if (priv->env.operation != SC_SEC_OPERATION_DECIPHER) return SC_ERROR_INVALID_ARGUMENTS; key_id = priv->rsa_key_ref * 2; /* Private key */ if (out_len < crgram_len) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Output buffer too small"); return SC_ERROR_BUFFER_TOO_SMALL; } r = msc_compute_crypt(card, key_id, 0x00, /* RSA NO PADDING */ 0x04, /* decrypt */ crgram, out, crgram_len, out_len); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card signature failed"); return r; } static int muscle_compute_signature(sc_card_t *card, const u8 *data, size_t data_len, u8 * out, size_t outlen) { muscle_private_t* priv = MUSCLE_DATA(card); u8 key_id; int r; key_id = priv->rsa_key_ref * 2; /* Private key */ if (outlen < data_len) { sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Output buffer too small"); return SC_ERROR_BUFFER_TOO_SMALL; } r = msc_compute_crypt(card, key_id, 0x00, /* RSA NO PADDING */ 0x04, /* -- decrypt raw... will do what we need since signing isn't yet supported */ data, out, data_len, outlen); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "Card signature failed"); return r; } static int muscle_get_challenge(sc_card_t *card, u8 *rnd, size_t len) { if (len == 0) return SC_SUCCESS; else { SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, msc_get_challenge(card, len, 0, NULL, rnd), "GET CHALLENGE cmd failed"); return (int) len; } } static int muscle_check_sw(sc_card_t * card, unsigned int sw1, unsigned int sw2) { if(sw1 == 0x9C) { switch(sw2) { case 0x01: /* SW_NO_MEMORY_LEFT */ return SC_ERROR_NOT_ENOUGH_MEMORY; case 0x02: /* SW_AUTH_FAILED */ return SC_ERROR_PIN_CODE_INCORRECT; case 0x03: /* SW_OPERATION_NOT_ALLOWED */ return SC_ERROR_NOT_ALLOWED; case 0x05: /* SW_UNSUPPORTED_FEATURE */ return SC_ERROR_NO_CARD_SUPPORT; case 0x06: /* SW_UNAUTHORIZED */ return SC_ERROR_SECURITY_STATUS_NOT_SATISFIED; case 0x07: /* SW_OBJECT_NOT_FOUND */ return SC_ERROR_FILE_NOT_FOUND; case 0x08: /* SW_OBJECT_EXISTS */ return SC_ERROR_FILE_ALREADY_EXISTS; case 0x09: /* SW_INCORRECT_ALG */ return SC_ERROR_INCORRECT_PARAMETERS; case 0x0B: /* SW_SIGNATURE_INVALID */ return SC_ERROR_CARD_CMD_FAILED; case 0x0C: /* SW_IDENTITY_BLOCKED */ return SC_ERROR_AUTH_METHOD_BLOCKED; case 0x0F: /* SW_INVALID_PARAMETER */ case 0x10: /* SW_INCORRECT_P1 */ case 0x11: /* SW_INCORRECT_P2 */ return SC_ERROR_INCORRECT_PARAMETERS; } } return iso_ops->check_sw(card, sw1, sw2); } static int muscle_card_reader_lock_obtained(sc_card_t *card, int was_reset) { int r = SC_SUCCESS; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (was_reset > 0) { if (msc_select_applet(card, muscleAppletId, sizeof muscleAppletId) != 1) { r = SC_ERROR_INVALID_CARD; } } LOG_FUNC_RETURN(card->ctx, r); } static struct sc_card_driver * sc_get_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; muscle_ops = *iso_drv->ops; muscle_ops.check_sw = muscle_check_sw; muscle_ops.pin_cmd = muscle_pin_cmd; muscle_ops.match_card = muscle_match_card; muscle_ops.init = muscle_init; muscle_ops.finish = muscle_finish; muscle_ops.get_challenge = muscle_get_challenge; muscle_ops.set_security_env = muscle_set_security_env; muscle_ops.restore_security_env = muscle_restore_security_env; muscle_ops.compute_signature = muscle_compute_signature; muscle_ops.decipher = muscle_decipher; muscle_ops.card_ctl = muscle_card_ctl; muscle_ops.read_binary = muscle_read_binary; muscle_ops.update_binary = muscle_update_binary; muscle_ops.create_file = muscle_create_file; muscle_ops.select_file = muscle_select_file; muscle_ops.delete_file = muscle_delete_file; muscle_ops.list_files = muscle_list_files; muscle_ops.card_reader_lock_obtained = muscle_card_reader_lock_obtained; return &muscle_drv; } struct sc_card_driver * sc_get_muscle_driver(void) { return sc_get_driver(); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_345_2
crossvul-cpp_data_good_345_7
/* * sc.c: General functions * * Copyright (C) 2001, 2002 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <ctype.h> #include <stdlib.h> #include <string.h> #include <assert.h> #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif #ifdef ENABLE_OPENSSL #include <openssl/crypto.h> /* for OPENSSL_cleanse */ #endif #include "internal.h" #ifdef PACKAGE_VERSION static const char *sc_version = PACKAGE_VERSION; #else static const char *sc_version = "(undef)"; #endif const char *sc_get_version(void) { return sc_version; } int sc_hex_to_bin(const char *in, u8 *out, size_t *outlen) { int err = SC_SUCCESS; size_t left, count = 0, in_len; if (in == NULL || out == NULL || outlen == NULL) { return SC_ERROR_INVALID_ARGUMENTS; } left = *outlen; in_len = strlen(in); while (*in != '\0') { int byte = 0, nybbles = 2; while (nybbles-- && *in && *in != ':' && *in != ' ') { char c; byte <<= 4; c = *in++; if ('0' <= c && c <= '9') c -= '0'; else if ('a' <= c && c <= 'f') c = c - 'a' + 10; else if ('A' <= c && c <= 'F') c = c - 'A' + 10; else { err = SC_ERROR_INVALID_ARGUMENTS; goto out; } byte |= c; } /* Detect premature end of string before byte is complete */ if (in_len > 1 && *in == '\0' && nybbles >= 0) { err = SC_ERROR_INVALID_ARGUMENTS; break; } if (*in == ':' || *in == ' ') in++; if (left <= 0) { err = SC_ERROR_BUFFER_TOO_SMALL; break; } out[count++] = (u8) byte; left--; } out: *outlen = count; return err; } int sc_bin_to_hex(const u8 *in, size_t in_len, char *out, size_t out_len, int in_sep) { unsigned int n, sep_len; char *pos, *end, sep; sep = (char)in_sep; sep_len = sep > 0 ? 1 : 0; pos = out; end = out + out_len; for (n = 0; n < in_len; n++) { if (pos + 3 + sep_len >= end) return SC_ERROR_BUFFER_TOO_SMALL; if (n && sep_len) *pos++ = sep; sprintf(pos, "%02x", in[n]); pos += 2; } *pos = '\0'; return SC_SUCCESS; } /* * Right trim all non-printable characters */ size_t sc_right_trim(u8 *buf, size_t len) { size_t i; if (!buf) return 0; if (len > 0) { for(i = len-1; i > 0; i--) { if(!isprint(buf[i])) { buf[i] = '\0'; len--; continue; } break; } } return len; } u8 *ulong2bebytes(u8 *buf, unsigned long x) { if (buf != NULL) { buf[3] = (u8) (x & 0xff); buf[2] = (u8) ((x >> 8) & 0xff); buf[1] = (u8) ((x >> 16) & 0xff); buf[0] = (u8) ((x >> 24) & 0xff); } return buf; } u8 *ushort2bebytes(u8 *buf, unsigned short x) { if (buf != NULL) { buf[1] = (u8) (x & 0xff); buf[0] = (u8) ((x >> 8) & 0xff); } return buf; } unsigned long bebytes2ulong(const u8 *buf) { if (buf == NULL) return 0UL; return (unsigned long) (buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]); } unsigned short bebytes2ushort(const u8 *buf) { if (buf == NULL) return 0U; return (unsigned short) (buf[0] << 8 | buf[1]); } unsigned short lebytes2ushort(const u8 *buf) { if (buf == NULL) return 0U; return (unsigned short)buf[1] << 8 | (unsigned short)buf[0]; } void sc_init_oid(struct sc_object_id *oid) { int ii; if (!oid) return; for (ii=0; ii<SC_MAX_OBJECT_ID_OCTETS; ii++) oid->value[ii] = -1; } int sc_format_oid(struct sc_object_id *oid, const char *in) { int ii, ret = SC_ERROR_INVALID_ARGUMENTS; const char *p; char *q; if (oid == NULL || in == NULL) return SC_ERROR_INVALID_ARGUMENTS; sc_init_oid(oid); p = in; for (ii=0; ii < SC_MAX_OBJECT_ID_OCTETS; ii++) { oid->value[ii] = strtol(p, &q, 10); if (!*q) break; if (!(q[0] == '.' && isdigit(q[1]))) goto out; p = q + 1; } if (!sc_valid_oid(oid)) goto out; ret = SC_SUCCESS; out: if (ret) sc_init_oid(oid); return ret; } int sc_compare_oid(const struct sc_object_id *oid1, const struct sc_object_id *oid2) { int i; if (oid1 == NULL || oid2 == NULL) { return SC_ERROR_INVALID_ARGUMENTS; } for (i = 0; i < SC_MAX_OBJECT_ID_OCTETS; i++) { if (oid1->value[i] != oid2->value[i]) return 0; if (oid1->value[i] == -1) break; } return 1; } int sc_valid_oid(const struct sc_object_id *oid) { int ii; if (!oid) return 0; if (oid->value[0] == -1 || oid->value[1] == -1) return 0; if (oid->value[0] > 2 || oid->value[1] > 39) return 0; for (ii=0;ii<SC_MAX_OBJECT_ID_OCTETS;ii++) if (oid->value[ii]) break; if (ii==SC_MAX_OBJECT_ID_OCTETS) return 0; return 1; } int sc_detect_card_presence(sc_reader_t *reader) { int r; LOG_FUNC_CALLED(reader->ctx); if (reader->ops->detect_card_presence == NULL) LOG_FUNC_RETURN(reader->ctx, SC_ERROR_NOT_SUPPORTED); r = reader->ops->detect_card_presence(reader); LOG_FUNC_RETURN(reader->ctx, r); } int sc_path_set(sc_path_t *path, int type, const u8 *id, size_t id_len, int idx, int count) { if (path == NULL || id == NULL || id_len == 0 || id_len > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memset(path, 0, sizeof(*path)); memcpy(path->value, id, id_len); path->len = id_len; path->type = type; path->index = idx; path->count = count; return SC_SUCCESS; } void sc_format_path(const char *str, sc_path_t *path) { int type = SC_PATH_TYPE_PATH; if (path) { memset(path, 0, sizeof(*path)); if (*str == 'i' || *str == 'I') { type = SC_PATH_TYPE_FILE_ID; str++; } path->len = sizeof(path->value); if (sc_hex_to_bin(str, path->value, &path->len) >= 0) { path->type = type; } path->count = -1; } } int sc_append_path(sc_path_t *dest, const sc_path_t *src) { return sc_concatenate_path(dest, dest, src); } int sc_append_path_id(sc_path_t *dest, const u8 *id, size_t idlen) { if (dest->len + idlen > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memcpy(dest->value + dest->len, id, idlen); dest->len += idlen; return SC_SUCCESS; } int sc_append_file_id(sc_path_t *dest, unsigned int fid) { u8 id[2] = { fid >> 8, fid & 0xff }; return sc_append_path_id(dest, id, 2); } int sc_concatenate_path(sc_path_t *d, const sc_path_t *p1, const sc_path_t *p2) { sc_path_t tpath; if (d == NULL || p1 == NULL || p2 == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (p1->type == SC_PATH_TYPE_DF_NAME || p2->type == SC_PATH_TYPE_DF_NAME) /* we do not support concatenation of AIDs at the moment */ return SC_ERROR_NOT_SUPPORTED; if (p1->len + p2->len > SC_MAX_PATH_SIZE) return SC_ERROR_INVALID_ARGUMENTS; memset(&tpath, 0, sizeof(sc_path_t)); memcpy(tpath.value, p1->value, p1->len); memcpy(tpath.value + p1->len, p2->value, p2->len); tpath.len = p1->len + p2->len; tpath.type = SC_PATH_TYPE_PATH; /* use 'index' and 'count' entry of the second path object */ tpath.index = p2->index; tpath.count = p2->count; /* the result is currently always as path */ tpath.type = SC_PATH_TYPE_PATH; *d = tpath; return SC_SUCCESS; } const char *sc_print_path(const sc_path_t *path) { static char buffer[SC_MAX_PATH_STRING_SIZE + SC_MAX_AID_STRING_SIZE]; if (sc_path_print(buffer, sizeof(buffer), path) != SC_SUCCESS) buffer[0] = '\0'; return buffer; } int sc_path_print(char *buf, size_t buflen, const sc_path_t *path) { size_t i; if (buf == NULL || path == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (buflen < path->len * 2 + path->aid.len * 2 + 1) return SC_ERROR_BUFFER_TOO_SMALL; buf[0] = '\0'; if (path->aid.len) { for (i = 0; i < path->aid.len; i++) snprintf(buf + strlen(buf), buflen - strlen(buf), "%02x", path->aid.value[i]); snprintf(buf + strlen(buf), buflen - strlen(buf), "::"); } for (i = 0; i < path->len; i++) snprintf(buf + strlen(buf), buflen - strlen(buf), "%02x", path->value[i]); if (!path->aid.len && path->type == SC_PATH_TYPE_DF_NAME) snprintf(buf + strlen(buf), buflen - strlen(buf), "::"); return SC_SUCCESS; } int sc_compare_path(const sc_path_t *path1, const sc_path_t *path2) { return path1->len == path2->len && !memcmp(path1->value, path2->value, path1->len); } int sc_compare_path_prefix(const sc_path_t *prefix, const sc_path_t *path) { sc_path_t tpath; if (prefix->len > path->len) return 0; tpath = *path; tpath.len = prefix->len; return sc_compare_path(&tpath, prefix); } const sc_path_t *sc_get_mf_path(void) { static const sc_path_t mf_path = { {0x3f, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 0, 0, SC_PATH_TYPE_PATH, {{0},0} }; return &mf_path; } int sc_file_add_acl_entry(sc_file_t *file, unsigned int operation, unsigned int method, unsigned long key_ref) { sc_acl_entry_t *p, *_new; if (file == NULL || operation >= SC_MAX_AC_OPS) { return SC_ERROR_INVALID_ARGUMENTS; } switch (method) { case SC_AC_NEVER: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 1; return SC_SUCCESS; case SC_AC_NONE: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 2; return SC_SUCCESS; case SC_AC_UNKNOWN: sc_file_clear_acl_entries(file, operation); file->acl[operation] = (sc_acl_entry_t *) 3; return SC_SUCCESS; default: /* NONE and UNKNOWN get zapped when a new AC is added. * If the ACL is NEVER, additional entries will be * dropped silently. */ if (file->acl[operation] == (sc_acl_entry_t *) 1) return SC_SUCCESS; if (file->acl[operation] == (sc_acl_entry_t *) 2 || file->acl[operation] == (sc_acl_entry_t *) 3) file->acl[operation] = NULL; } /* If the entry is already present (e.g. due to the mapping) * of the card's AC with OpenSC's), don't add it again. */ for (p = file->acl[operation]; p != NULL; p = p->next) { if ((p->method == method) && (p->key_ref == key_ref)) return SC_SUCCESS; } _new = malloc(sizeof(sc_acl_entry_t)); if (_new == NULL) return SC_ERROR_OUT_OF_MEMORY; _new->method = method; _new->key_ref = key_ref; _new->next = NULL; p = file->acl[operation]; if (p == NULL) { file->acl[operation] = _new; return SC_SUCCESS; } while (p->next != NULL) p = p->next; p->next = _new; return SC_SUCCESS; } const sc_acl_entry_t * sc_file_get_acl_entry(const sc_file_t *file, unsigned int operation) { sc_acl_entry_t *p; static const sc_acl_entry_t e_never = { SC_AC_NEVER, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; static const sc_acl_entry_t e_none = { SC_AC_NONE, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; static const sc_acl_entry_t e_unknown = { SC_AC_UNKNOWN, SC_AC_KEY_REF_NONE, {{0, 0, 0, {0}}}, NULL }; if (file == NULL || operation >= SC_MAX_AC_OPS) { return NULL; } p = file->acl[operation]; if (p == (sc_acl_entry_t *) 1) return &e_never; if (p == (sc_acl_entry_t *) 2) return &e_none; if (p == (sc_acl_entry_t *) 3) return &e_unknown; return file->acl[operation]; } void sc_file_clear_acl_entries(sc_file_t *file, unsigned int operation) { sc_acl_entry_t *e; if (file == NULL || operation >= SC_MAX_AC_OPS) { return; } e = file->acl[operation]; if (e == (sc_acl_entry_t *) 1 || e == (sc_acl_entry_t *) 2 || e == (sc_acl_entry_t *) 3) { file->acl[operation] = NULL; return; } while (e != NULL) { sc_acl_entry_t *tmp = e->next; free(e); e = tmp; } file->acl[operation] = NULL; } sc_file_t * sc_file_new(void) { sc_file_t *file = (sc_file_t *)calloc(1, sizeof(sc_file_t)); if (file == NULL) return NULL; file->magic = SC_FILE_MAGIC; return file; } void sc_file_free(sc_file_t *file) { unsigned int i; if (file == NULL || !sc_file_valid(file)) return; file->magic = 0; for (i = 0; i < SC_MAX_AC_OPS; i++) sc_file_clear_acl_entries(file, i); if (file->sec_attr) free(file->sec_attr); if (file->prop_attr) free(file->prop_attr); if (file->type_attr) free(file->type_attr); if (file->encoded_content) free(file->encoded_content); free(file); } void sc_file_dup(sc_file_t **dest, const sc_file_t *src) { sc_file_t *newf; const sc_acl_entry_t *e; unsigned int op; *dest = NULL; if (!sc_file_valid(src)) return; newf = sc_file_new(); if (newf == NULL) return; *dest = newf; memcpy(&newf->path, &src->path, sizeof(struct sc_path)); memcpy(&newf->name, &src->name, sizeof(src->name)); newf->namelen = src->namelen; newf->type = src->type; newf->shareable = src->shareable; newf->ef_structure = src->ef_structure; newf->size = src->size; newf->id = src->id; newf->status = src->status; for (op = 0; op < SC_MAX_AC_OPS; op++) { newf->acl[op] = NULL; e = sc_file_get_acl_entry(src, op); if (e != NULL) { if (sc_file_add_acl_entry(newf, op, e->method, e->key_ref) < 0) goto err; } } newf->record_length = src->record_length; newf->record_count = src->record_count; if (sc_file_set_sec_attr(newf, src->sec_attr, src->sec_attr_len) < 0) goto err; if (sc_file_set_prop_attr(newf, src->prop_attr, src->prop_attr_len) < 0) goto err; if (sc_file_set_type_attr(newf, src->type_attr, src->type_attr_len) < 0) goto err; if (sc_file_set_content(newf, src->encoded_content, src->encoded_content_len) < 0) goto err; return; err: sc_file_free(newf); *dest = NULL; } int sc_file_set_sec_attr(sc_file_t *file, const u8 *sec_attr, size_t sec_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (sec_attr == NULL || sec_attr_len) { if (file->sec_attr != NULL) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return 0; } tmp = (u8 *) realloc(file->sec_attr, sec_attr_len); if (!tmp) { if (file->sec_attr) free(file->sec_attr); file->sec_attr = NULL; file->sec_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->sec_attr = tmp; memcpy(file->sec_attr, sec_attr, sec_attr_len); file->sec_attr_len = sec_attr_len; return 0; } int sc_file_set_prop_attr(sc_file_t *file, const u8 *prop_attr, size_t prop_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (prop_attr == NULL) { if (file->prop_attr != NULL) free(file->prop_attr); file->prop_attr = NULL; file->prop_attr_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->prop_attr, prop_attr_len); if (!tmp) { if (file->prop_attr) free(file->prop_attr); file->prop_attr = NULL; file->prop_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->prop_attr = tmp; memcpy(file->prop_attr, prop_attr, prop_attr_len); file->prop_attr_len = prop_attr_len; return SC_SUCCESS; } int sc_file_set_type_attr(sc_file_t *file, const u8 *type_attr, size_t type_attr_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (type_attr == NULL) { if (file->type_attr != NULL) free(file->type_attr); file->type_attr = NULL; file->type_attr_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->type_attr, type_attr_len); if (!tmp) { if (file->type_attr) free(file->type_attr); file->type_attr = NULL; file->type_attr_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->type_attr = tmp; memcpy(file->type_attr, type_attr, type_attr_len); file->type_attr_len = type_attr_len; return SC_SUCCESS; } int sc_file_set_content(sc_file_t *file, const u8 *content, size_t content_len) { u8 *tmp; if (!sc_file_valid(file)) { return SC_ERROR_INVALID_ARGUMENTS; } if (content == NULL) { if (file->encoded_content != NULL) free(file->encoded_content); file->encoded_content = NULL; file->encoded_content_len = 0; return SC_SUCCESS; } tmp = (u8 *) realloc(file->encoded_content, content_len); if (!tmp) { if (file->encoded_content) free(file->encoded_content); file->encoded_content = NULL; file->encoded_content_len = 0; return SC_ERROR_OUT_OF_MEMORY; } file->encoded_content = tmp; memcpy(file->encoded_content, content, content_len); file->encoded_content_len = content_len; return SC_SUCCESS; } int sc_file_valid(const sc_file_t *file) { if (file == NULL) return 0; return file->magic == SC_FILE_MAGIC; } int _sc_parse_atr(sc_reader_t *reader) { u8 *p = reader->atr.value; int atr_len = (int) reader->atr.len; int n_hist, x; int tx[4] = {-1, -1, -1, -1}; int i, FI, DI; const int Fi_table[] = { 372, 372, 558, 744, 1116, 1488, 1860, -1, -1, 512, 768, 1024, 1536, 2048, -1, -1 }; const int f_table[] = { 40, 50, 60, 80, 120, 160, 200, -1, -1, 50, 75, 100, 150, 200, -1, -1 }; const int Di_table[] = { -1, 1, 2, 4, 8, 16, 32, -1, 12, 20, -1, -1, -1, -1, -1, -1 }; reader->atr_info.hist_bytes_len = 0; reader->atr_info.hist_bytes = NULL; if (atr_len == 0) { sc_log(reader->ctx, "empty ATR - card not present?\n"); return SC_ERROR_INTERNAL; } if (p[0] != 0x3B && p[0] != 0x3F) { sc_log(reader->ctx, "invalid sync byte in ATR: 0x%02X\n", p[0]); return SC_ERROR_INTERNAL; } n_hist = p[1] & 0x0F; x = p[1] >> 4; p += 2; atr_len -= 2; for (i = 0; i < 4 && atr_len > 0; i++) { if (x & (1 << i)) { tx[i] = *p; p++; atr_len--; } else tx[i] = -1; } if (tx[0] >= 0) { reader->atr_info.FI = FI = tx[0] >> 4; reader->atr_info.DI = DI = tx[0] & 0x0F; reader->atr_info.Fi = Fi_table[FI]; reader->atr_info.f = f_table[FI]; reader->atr_info.Di = Di_table[DI]; } else { reader->atr_info.Fi = -1; reader->atr_info.f = -1; reader->atr_info.Di = -1; } if (tx[2] >= 0) reader->atr_info.N = tx[3]; else reader->atr_info.N = -1; while (tx[3] > 0 && tx[3] & 0xF0 && atr_len > 0) { x = tx[3] >> 4; for (i = 0; i < 4 && atr_len > 0; i++) { if (x & (1 << i)) { tx[i] = *p; p++; atr_len--; } else tx[i] = -1; } } if (atr_len <= 0) return SC_SUCCESS; if (n_hist > atr_len) n_hist = atr_len; reader->atr_info.hist_bytes_len = n_hist; reader->atr_info.hist_bytes = p; return SC_SUCCESS; } void sc_mem_clear(void *ptr, size_t len) { if (len > 0) { #ifdef ENABLE_OPENSSL OPENSSL_cleanse(ptr, len); #else memset(ptr, 0, len); #endif } } int sc_mem_reverse(unsigned char *buf, size_t len) { unsigned char ch; size_t ii; if (!buf || !len) return SC_ERROR_INVALID_ARGUMENTS; for (ii = 0; ii < len / 2; ii++) { ch = *(buf + ii); *(buf + ii) = *(buf + len - 1 - ii); *(buf + len - 1 - ii) = ch; } return SC_SUCCESS; } static int sc_remote_apdu_allocate(struct sc_remote_data *rdata, struct sc_remote_apdu **new_rapdu) { struct sc_remote_apdu *rapdu = NULL, *rr; if (!rdata) return SC_ERROR_INVALID_ARGUMENTS; rapdu = calloc(1, sizeof(struct sc_remote_apdu)); if (rapdu == NULL) return SC_ERROR_OUT_OF_MEMORY; rapdu->apdu.data = &rapdu->sbuf[0]; rapdu->apdu.resp = &rapdu->rbuf[0]; rapdu->apdu.resplen = sizeof(rapdu->rbuf); if (new_rapdu) *new_rapdu = rapdu; if (rdata->data == NULL) { rdata->data = rapdu; rdata->length = 1; return SC_SUCCESS; } for (rr = rdata->data; rr->next; rr = rr->next) ; rr->next = rapdu; rdata->length++; return SC_SUCCESS; } static void sc_remote_apdu_free (struct sc_remote_data *rdata) { struct sc_remote_apdu *rapdu = NULL; if (!rdata) return; rapdu = rdata->data; while(rapdu) { struct sc_remote_apdu *rr = rapdu->next; free(rapdu); rapdu = rr; } } void sc_remote_data_init(struct sc_remote_data *rdata) { if (!rdata) return; memset(rdata, 0, sizeof(struct sc_remote_data)); rdata->alloc = sc_remote_apdu_allocate; rdata->free = sc_remote_apdu_free; } static unsigned long sc_CRC_tab32[256]; static int sc_CRC_tab32_initialized = 0; unsigned sc_crc32(const unsigned char *value, size_t len) { size_t ii, jj; unsigned long crc; unsigned long index, long_c; if (!sc_CRC_tab32_initialized) { for (ii=0; ii<256; ii++) { crc = (unsigned long) ii; for (jj=0; jj<8; jj++) { if ( crc & 0x00000001L ) crc = ( crc >> 1 ) ^ 0xEDB88320l; else crc = crc >> 1; } sc_CRC_tab32[ii] = crc; } sc_CRC_tab32_initialized = 1; } crc = 0xffffffffL; for (ii=0; ii<len; ii++) { long_c = 0x000000ffL & (unsigned long) (*(value + ii)); index = crc ^ long_c; crc = (crc >> 8) ^ sc_CRC_tab32[ index & 0xff ]; } crc ^= 0xffffffff; return crc%0xffff; } const u8 *sc_compacttlv_find_tag(const u8 *buf, size_t len, u8 tag, size_t *outlen) { if (buf != NULL) { size_t idx; u8 plain_tag = tag & 0xF0; size_t expected_len = tag & 0x0F; for (idx = 0; idx < len; idx++) { if ((buf[idx] & 0xF0) == plain_tag && idx + expected_len < len && (expected_len == 0 || expected_len == (buf[idx] & 0x0F))) { if (outlen != NULL) *outlen = buf[idx] & 0x0F; return buf + (idx + 1); } idx += (buf[idx] & 0x0F); } } return NULL; } /**************************** mutex functions ************************/ int sc_mutex_create(const sc_context_t *ctx, void **mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->create_mutex != NULL) return ctx->thread_ctx->create_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_lock(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->lock_mutex != NULL) return ctx->thread_ctx->lock_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_unlock(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->unlock_mutex != NULL) return ctx->thread_ctx->unlock_mutex(mutex); else return SC_SUCCESS; } int sc_mutex_destroy(const sc_context_t *ctx, void *mutex) { if (ctx == NULL) return SC_ERROR_INVALID_ARGUMENTS; if (ctx->thread_ctx != NULL && ctx->thread_ctx->destroy_mutex != NULL) return ctx->thread_ctx->destroy_mutex(mutex); else return SC_SUCCESS; } unsigned long sc_thread_id(const sc_context_t *ctx) { if (ctx == NULL || ctx->thread_ctx == NULL || ctx->thread_ctx->thread_id == NULL) return 0UL; else return ctx->thread_ctx->thread_id(); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_345_7
crossvul-cpp_data_good_2384_0
/* ** Copyright (C) 2001-2014 Erik de Castro Lopo <erikd@mega-nerd.com> ** Copyright (C) 2004 Paavo Jumppanen ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation; either version 2.1 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* ** The sd2 support implemented in this file was partially sponsored ** (financially) by Paavo Jumppanen. */ /* ** Documentation on the Mac resource fork was obtained here : ** http://developer.apple.com/documentation/mac/MoreToolbox/MoreToolbox-99.html */ #include "sfconfig.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "sndfile.h" #include "sfendian.h" #include "common.h" /*------------------------------------------------------------------------------ * Markers. */ #define Sd2f_MARKER MAKE_MARKER ('S', 'd', '2', 'f') #define Sd2a_MARKER MAKE_MARKER ('S', 'd', '2', 'a') #define ALCH_MARKER MAKE_MARKER ('A', 'L', 'C', 'H') #define lsf1_MARKER MAKE_MARKER ('l', 's', 'f', '1') #define STR_MARKER MAKE_MARKER ('S', 'T', 'R', ' ') #define sdML_MARKER MAKE_MARKER ('s', 'd', 'M', 'L') enum { RSRC_STR = 111, RSRC_BIN } ; typedef struct { unsigned char * rsrc_data ; int rsrc_len ; int need_to_free_rsrc_data ; int data_offset, data_length ; int map_offset, map_length ; int type_count, type_offset ; int item_offset ; int str_index, str_count ; int string_offset ; /* All the above just to get these three. */ int sample_size, sample_rate, channels ; } SD2_RSRC ; typedef struct { int type ; int id ; char name [32] ; char value [32] ; int value_len ; } STR_RSRC ; /*------------------------------------------------------------------------------ * Private static functions. */ static int sd2_close (SF_PRIVATE *psf) ; static int sd2_parse_rsrc_fork (SF_PRIVATE *psf) ; static int parse_str_rsrc (SF_PRIVATE *psf, SD2_RSRC * rsrc) ; static int sd2_write_rsrc_fork (SF_PRIVATE *psf, int calc_length) ; /*------------------------------------------------------------------------------ ** Public functions. */ int sd2_open (SF_PRIVATE *psf) { int subformat, error = 0, valid ; /* SD2 is always big endian. */ psf->endian = SF_ENDIAN_BIG ; if (psf->file.mode == SFM_READ || (psf->file.mode == SFM_RDWR && psf->rsrclength > 0)) { psf_use_rsrc (psf, SF_TRUE) ; valid = psf_file_valid (psf) ; psf_use_rsrc (psf, SF_FALSE) ; if (! valid) { psf_log_printf (psf, "sd2_open : psf->rsrc.filedes < 0\n") ; return SFE_SD2_BAD_RSRC ; } ; error = sd2_parse_rsrc_fork (psf) ; if (error) goto error_cleanup ; } ; if ((SF_CONTAINER (psf->sf.format)) != SF_FORMAT_SD2) { error = SFE_BAD_OPEN_FORMAT ; goto error_cleanup ; } ; subformat = SF_CODEC (psf->sf.format) ; psf->dataoffset = 0 ; /* Only open and write the resource in RDWR mode is its current length is zero. */ if (psf->file.mode == SFM_WRITE || (psf->file.mode == SFM_RDWR && psf->rsrclength == 0)) { psf->rsrc.mode = psf->file.mode ; psf_open_rsrc (psf) ; error = sd2_write_rsrc_fork (psf, SF_FALSE) ; if (error) goto error_cleanup ; /* Not needed. */ psf->write_header = NULL ; } ; psf->container_close = sd2_close ; psf->blockwidth = psf->bytewidth * psf->sf.channels ; switch (subformat) { case SF_FORMAT_PCM_S8 : /* 8-bit linear PCM. */ case SF_FORMAT_PCM_16 : /* 16-bit linear PCM. */ case SF_FORMAT_PCM_24 : /* 24-bit linear PCM */ case SF_FORMAT_PCM_32 : /* 32-bit linear PCM */ error = pcm_init (psf) ; break ; default : error = SFE_UNIMPLEMENTED ; break ; } ; psf_fseek (psf, psf->dataoffset, SEEK_SET) ; error_cleanup: /* Close the resource fork regardless. We won't need it again. */ psf_close_rsrc (psf) ; return error ; } /* sd2_open */ /*------------------------------------------------------------------------------ */ static int sd2_close (SF_PRIVATE *psf) { if (psf->file.mode == SFM_WRITE) { /* Now we know for certain the audio_length of the file we can re-write ** correct values for the FORM, 8SVX and BODY chunks. */ } ; return 0 ; } /* sd2_close */ /*------------------------------------------------------------------------------ */ static inline void write_char (unsigned char * data, int offset, char value) { data [offset] = value ; } /* write_char */ static inline void write_short (unsigned char * data, int offset, short value) { data [offset] = value >> 8 ; data [offset + 1] = value ; } /* write_char */ static inline void write_int (unsigned char * data, int offset, int value) { data [offset] = value >> 24 ; data [offset + 1] = value >> 16 ; data [offset + 2] = value >> 8 ; data [offset + 3] = value ; } /* write_int */ static inline void write_marker (unsigned char * data, int offset, int value) { if (CPU_IS_BIG_ENDIAN) { data [offset] = value >> 24 ; data [offset + 1] = value >> 16 ; data [offset + 2] = value >> 8 ; data [offset + 3] = value ; } else { data [offset] = value ; data [offset + 1] = value >> 8 ; data [offset + 2] = value >> 16 ; data [offset + 3] = value >> 24 ; } ; } /* write_marker */ static void write_str (unsigned char * data, int offset, const char * buffer, int buffer_len) { memcpy (data + offset, buffer, buffer_len) ; } /* write_str */ static int sd2_write_rsrc_fork (SF_PRIVATE *psf, int UNUSED (calc_length)) { SD2_RSRC rsrc ; STR_RSRC str_rsrc [] = { { RSRC_STR, 1000, "_sample-size", "", 0 }, { RSRC_STR, 1001, "_sample-rate", "", 0 }, { RSRC_STR, 1002, "_channels", "", 0 }, { RSRC_BIN, 1000, "_Markers", "", 8 } } ; int k, str_offset, data_offset, next_str ; psf_use_rsrc (psf, SF_TRUE) ; memset (&rsrc, 0, sizeof (rsrc)) ; rsrc.sample_rate = psf->sf.samplerate ; rsrc.sample_size = psf->bytewidth ; rsrc.channels = psf->sf.channels ; rsrc.rsrc_data = psf->header ; rsrc.rsrc_len = sizeof (psf->header) ; memset (rsrc.rsrc_data, 0xea, rsrc.rsrc_len) ; snprintf (str_rsrc [0].value, sizeof (str_rsrc [0].value), "_%d", rsrc.sample_size) ; snprintf (str_rsrc [1].value, sizeof (str_rsrc [1].value), "_%d.000000", rsrc.sample_rate) ; snprintf (str_rsrc [2].value, sizeof (str_rsrc [2].value), "_%d", rsrc.channels) ; for (k = 0 ; k < ARRAY_LEN (str_rsrc) ; k++) { if (str_rsrc [k].value_len == 0) { str_rsrc [k].value_len = strlen (str_rsrc [k].value) ; str_rsrc [k].value [0] = str_rsrc [k].value_len - 1 ; } ; /* Turn name string into a pascal string. */ str_rsrc [k].name [0] = strlen (str_rsrc [k].name) - 1 ; } ; rsrc.data_offset = 0x100 ; /* ** Calculate data length : ** length of strings, plus the length of the sdML chunk. */ rsrc.data_length = 0 ; for (k = 0 ; k < ARRAY_LEN (str_rsrc) ; k++) rsrc.data_length += str_rsrc [k].value_len + 4 ; rsrc.map_offset = rsrc.data_offset + rsrc.data_length ; /* Very start of resource fork. */ write_int (rsrc.rsrc_data, 0, rsrc.data_offset) ; write_int (rsrc.rsrc_data, 4, rsrc.map_offset) ; write_int (rsrc.rsrc_data, 8, rsrc.data_length) ; write_char (rsrc.rsrc_data, 0x30, strlen (psf->file.name.c)) ; write_str (rsrc.rsrc_data, 0x31, psf->file.name.c, strlen (psf->file.name.c)) ; write_short (rsrc.rsrc_data, 0x50, 0) ; write_marker (rsrc.rsrc_data, 0x52, Sd2f_MARKER) ; write_marker (rsrc.rsrc_data, 0x56, lsf1_MARKER) ; /* Very start of resource map. */ write_int (rsrc.rsrc_data, rsrc.map_offset + 0, rsrc.data_offset) ; write_int (rsrc.rsrc_data, rsrc.map_offset + 4, rsrc.map_offset) ; write_int (rsrc.rsrc_data, rsrc.map_offset + 8, rsrc.data_length) ; /* These I don't currently understand. */ if (1) { write_char (rsrc.rsrc_data, rsrc.map_offset+ 16, 1) ; /* Next resource map. */ write_int (rsrc.rsrc_data, rsrc.map_offset + 17, 0x12345678) ; /* File ref number. */ write_short (rsrc.rsrc_data, rsrc.map_offset + 21, 0xabcd) ; /* Fork attributes. */ write_short (rsrc.rsrc_data, rsrc.map_offset + 23, 0) ; } ; /* Resource type offset. */ rsrc.type_offset = rsrc.map_offset + 30 ; write_short (rsrc.rsrc_data, rsrc.map_offset + 24, rsrc.type_offset - rsrc.map_offset - 2) ; /* Type index max. */ rsrc.type_count = 2 ; write_short (rsrc.rsrc_data, rsrc.map_offset + 28, rsrc.type_count - 1) ; rsrc.item_offset = rsrc.type_offset + rsrc.type_count * 8 ; rsrc.str_count = ARRAY_LEN (str_rsrc) ; rsrc.string_offset = rsrc.item_offset + (rsrc.str_count + 1) * 12 - rsrc.map_offset ; write_short (rsrc.rsrc_data, rsrc.map_offset + 26, rsrc.string_offset) ; /* Write 'STR ' resource type. */ rsrc.str_count = 3 ; write_marker (rsrc.rsrc_data, rsrc.type_offset, STR_MARKER) ; write_short (rsrc.rsrc_data, rsrc.type_offset + 4, rsrc.str_count - 1) ; write_short (rsrc.rsrc_data, rsrc.type_offset + 6, 0x12) ; /* Write 'sdML' resource type. */ write_marker (rsrc.rsrc_data, rsrc.type_offset + 8, sdML_MARKER) ; write_short (rsrc.rsrc_data, rsrc.type_offset + 12, 0) ; write_short (rsrc.rsrc_data, rsrc.type_offset + 14, 0x36) ; str_offset = rsrc.map_offset + rsrc.string_offset ; next_str = 0 ; data_offset = rsrc.data_offset ; for (k = 0 ; k < ARRAY_LEN (str_rsrc) ; k++) { write_str (rsrc.rsrc_data, str_offset, str_rsrc [k].name, strlen (str_rsrc [k].name)) ; write_short (rsrc.rsrc_data, rsrc.item_offset + k * 12, str_rsrc [k].id) ; write_short (rsrc.rsrc_data, rsrc.item_offset + k * 12 + 2, next_str) ; str_offset += strlen (str_rsrc [k].name) ; next_str += strlen (str_rsrc [k].name) ; write_int (rsrc.rsrc_data, rsrc.item_offset + k * 12 + 4, data_offset - rsrc.data_offset) ; write_int (rsrc.rsrc_data, data_offset, str_rsrc [k].value_len) ; write_str (rsrc.rsrc_data, data_offset + 4, str_rsrc [k].value, str_rsrc [k].value_len) ; data_offset += 4 + str_rsrc [k].value_len ; } ; /* Finally, calculate and set map length. */ rsrc.map_length = str_offset - rsrc.map_offset ; write_int (rsrc.rsrc_data, 12, rsrc.map_length) ; write_int (rsrc.rsrc_data, rsrc.map_offset + 12, rsrc.map_length) ; rsrc.rsrc_len = rsrc.map_offset + rsrc.map_length ; psf_fwrite (rsrc.rsrc_data, rsrc.rsrc_len, 1, psf) ; psf_use_rsrc (psf, SF_FALSE) ; if (psf->error) return psf->error ; return 0 ; } /* sd2_write_rsrc_fork */ /*------------------------------------------------------------------------------ */ static inline int read_rsrc_char (const SD2_RSRC *prsrc, int offset) { const unsigned char * data = prsrc->rsrc_data ; if (offset < 0 || offset >= prsrc->rsrc_len) return 0 ; return data [offset] ; } /* read_rsrc_char */ static inline int read_rsrc_short (const SD2_RSRC *prsrc, int offset) { const unsigned char * data = prsrc->rsrc_data ; if (offset < 0 || offset + 1 >= prsrc->rsrc_len) return 0 ; return (data [offset] << 8) + data [offset + 1] ; } /* read_rsrc_short */ static inline int read_rsrc_int (const SD2_RSRC *prsrc, int offset) { const unsigned char * data = prsrc->rsrc_data ; if (offset < 0 || offset + 3 >= prsrc->rsrc_len) return 0 ; return (((uint32_t) data [offset]) << 24) + (data [offset + 1] << 16) + (data [offset + 2] << 8) + data [offset + 3] ; } /* read_rsrc_int */ static inline int read_rsrc_marker (const SD2_RSRC *prsrc, int offset) { const unsigned char * data = prsrc->rsrc_data ; if (offset < 0 || offset + 3 >= prsrc->rsrc_len) return 0 ; if (CPU_IS_BIG_ENDIAN) return (((uint32_t) data [offset]) << 24) + (data [offset + 1] << 16) + (data [offset + 2] << 8) + data [offset + 3] ; if (CPU_IS_LITTLE_ENDIAN) return data [offset] + (data [offset + 1] << 8) + (data [offset + 2] << 16) + (((uint32_t) data [offset + 3]) << 24) ; return 0 ; } /* read_rsrc_marker */ static void read_rsrc_str (const SD2_RSRC *prsrc, int offset, char * buffer, int buffer_len) { const unsigned char * data = prsrc->rsrc_data ; int k ; memset (buffer, 0, buffer_len) ; if (offset < 0 || offset + buffer_len >= prsrc->rsrc_len) return ; for (k = 0 ; k < buffer_len - 1 ; k++) { if (psf_isprint (data [offset + k]) == 0) return ; buffer [k] = data [offset + k] ; } ; return ; } /* read_rsrc_str */ static int sd2_parse_rsrc_fork (SF_PRIVATE *psf) { SD2_RSRC rsrc ; int k, marker, error = 0 ; psf_use_rsrc (psf, SF_TRUE) ; memset (&rsrc, 0, sizeof (rsrc)) ; rsrc.rsrc_len = psf_get_filelen (psf) ; psf_log_printf (psf, "Resource length : %d (0x%04X)\n", rsrc.rsrc_len, rsrc.rsrc_len) ; if (rsrc.rsrc_len > SIGNED_SIZEOF (psf->header)) { rsrc.rsrc_data = calloc (1, rsrc.rsrc_len) ; rsrc.need_to_free_rsrc_data = SF_TRUE ; } else { rsrc.rsrc_data = psf->header ; rsrc.need_to_free_rsrc_data = SF_FALSE ; } ; /* Read in the whole lot. */ psf_fread (rsrc.rsrc_data, rsrc.rsrc_len, 1, psf) ; /* Reset the header storage because we have changed to the rsrcdes. */ psf->headindex = psf->headend = rsrc.rsrc_len ; rsrc.data_offset = read_rsrc_int (&rsrc, 0) ; rsrc.map_offset = read_rsrc_int (&rsrc, 4) ; rsrc.data_length = read_rsrc_int (&rsrc, 8) ; rsrc.map_length = read_rsrc_int (&rsrc, 12) ; if (rsrc.data_offset == 0x51607 && rsrc.map_offset == 0x20000) { psf_log_printf (psf, "Trying offset of 0x52 bytes.\n") ; rsrc.data_offset = read_rsrc_int (&rsrc, 0x52 + 0) + 0x52 ; rsrc.map_offset = read_rsrc_int (&rsrc, 0x52 + 4) + 0x52 ; rsrc.data_length = read_rsrc_int (&rsrc, 0x52 + 8) ; rsrc.map_length = read_rsrc_int (&rsrc, 0x52 + 12) ; } ; psf_log_printf (psf, " data offset : 0x%04X\n map offset : 0x%04X\n" " data length : 0x%04X\n map length : 0x%04X\n", rsrc.data_offset, rsrc.map_offset, rsrc.data_length, rsrc.map_length) ; if (rsrc.data_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.data_offset (%d, 0x%x) > len\n", rsrc.data_offset, rsrc.data_offset) ; error = SFE_SD2_BAD_DATA_OFFSET ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.map_offset > len\n") ; error = SFE_SD2_BAD_MAP_OFFSET ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.data_length > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.data_length > len\n") ; error = SFE_SD2_BAD_DATA_LENGTH ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_length > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.map_length > len\n") ; error = SFE_SD2_BAD_MAP_LENGTH ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.data_offset + rsrc.data_length != rsrc.map_offset || rsrc.map_offset + rsrc.map_length != rsrc.rsrc_len) { psf_log_printf (psf, "Error : This does not look like a MacOSX resource fork.\n") ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_offset + 28 >= rsrc.rsrc_len) { psf_log_printf (psf, "Bad map offset (%d + 28 > %d).\n", rsrc.map_offset, rsrc.rsrc_len) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.string_offset = rsrc.map_offset + read_rsrc_short (&rsrc, rsrc.map_offset + 26) ; if (rsrc.string_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Bad string offset (%d).\n", rsrc.string_offset) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.type_offset = rsrc.map_offset + 30 ; if (rsrc.map_offset + 28 > rsrc.rsrc_len) { psf_log_printf (psf, "Bad map offset.\n") ; goto parse_rsrc_fork_cleanup ; } ; rsrc.type_count = read_rsrc_short (&rsrc, rsrc.map_offset + 28) + 1 ; if (rsrc.type_count < 1) { psf_log_printf (psf, "Bad type count.\n") ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.item_offset = rsrc.type_offset + rsrc.type_count * 8 ; if (rsrc.item_offset < 0 || rsrc.item_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Bad item offset (%d).\n", rsrc.item_offset) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.str_index = -1 ; for (k = 0 ; k < rsrc.type_count ; k ++) { if (rsrc.type_offset + k * 8 > rsrc.rsrc_len) { psf_log_printf (psf, "Bad rsrc marker.\n") ; goto parse_rsrc_fork_cleanup ; } ; marker = read_rsrc_marker (&rsrc, rsrc.type_offset + k * 8) ; if (marker == STR_MARKER) { rsrc.str_index = k ; rsrc.str_count = read_rsrc_short (&rsrc, rsrc.type_offset + k * 8 + 4) + 1 ; error = parse_str_rsrc (psf, &rsrc) ; goto parse_rsrc_fork_cleanup ; } ; } ; psf_log_printf (psf, "No 'STR ' resource.\n") ; error = SFE_SD2_BAD_RSRC ; parse_rsrc_fork_cleanup : psf_use_rsrc (psf, SF_FALSE) ; if (rsrc.need_to_free_rsrc_data) free (rsrc.rsrc_data) ; return error ; } /* sd2_parse_rsrc_fork */ static int parse_str_rsrc (SF_PRIVATE *psf, SD2_RSRC * rsrc) { char name [32], value [32] ; int k, str_offset, rsrc_id, data_offset = 0, data_len = 0 ; psf_log_printf (psf, "Finding parameters :\n") ; str_offset = rsrc->string_offset ; psf_log_printf (psf, " Offset RsrcId dlen slen Value\n") ; for (k = 0 ; data_offset + data_len < rsrc->rsrc_len ; k++) { int slen ; slen = read_rsrc_char (rsrc, str_offset) ; read_rsrc_str (rsrc, str_offset + 1, name, SF_MIN (SIGNED_SIZEOF (name), slen + 1)) ; str_offset += slen + 1 ; rsrc_id = read_rsrc_short (rsrc, rsrc->item_offset + k * 12) ; data_offset = rsrc->data_offset + read_rsrc_int (rsrc, rsrc->item_offset + k * 12 + 4) ; if (data_offset < 0 || data_offset > rsrc->rsrc_len) { psf_log_printf (psf, "Exiting parser on data offset of %d.\n", data_offset) ; break ; } ; data_len = read_rsrc_int (rsrc, data_offset) ; if (data_len < 0 || data_len > rsrc->rsrc_len) { psf_log_printf (psf, "Exiting parser on data length of %d.\n", data_len) ; break ; } ; slen = read_rsrc_char (rsrc, data_offset + 4) ; read_rsrc_str (rsrc, data_offset + 5, value, SF_MIN (SIGNED_SIZEOF (value), slen + 1)) ; psf_log_printf (psf, " 0x%04x %4d %4d %3d '%s'\n", data_offset, rsrc_id, data_len, slen, value) ; if (rsrc_id == 1000 && rsrc->sample_size == 0) rsrc->sample_size = strtol (value, NULL, 10) ; else if (rsrc_id == 1001 && rsrc->sample_rate == 0) rsrc->sample_rate = strtol (value, NULL, 10) ; else if (rsrc_id == 1002 && rsrc->channels == 0) rsrc->channels = strtol (value, NULL, 10) ; } ; psf_log_printf (psf, "Found Parameters :\n") ; psf_log_printf (psf, " sample-size : %d\n", rsrc->sample_size) ; psf_log_printf (psf, " sample-rate : %d\n", rsrc->sample_rate) ; psf_log_printf (psf, " channels : %d\n", rsrc->channels) ; if (rsrc->sample_rate <= 4 && rsrc->sample_size > 4) { int temp ; psf_log_printf (psf, "Geez!! Looks like sample rate and sample size got switched.\nCorrecting this screw up.\n") ; temp = rsrc->sample_rate ; rsrc->sample_rate = rsrc->sample_size ; rsrc->sample_size = temp ; } ; if (rsrc->sample_rate < 0) { psf_log_printf (psf, "Bad sample rate (%d)\n", rsrc->sample_rate) ; return SFE_SD2_BAD_RSRC ; } ; if (rsrc->channels < 0) { psf_log_printf (psf, "Bad channel count (%d)\n", rsrc->channels) ; return SFE_SD2_BAD_RSRC ; } ; psf->sf.samplerate = rsrc->sample_rate ; psf->sf.channels = rsrc->channels ; psf->bytewidth = rsrc->sample_size ; switch (rsrc->sample_size) { case 1 : psf->sf.format = SF_FORMAT_SD2 | SF_FORMAT_PCM_S8 ; break ; case 2 : psf->sf.format = SF_FORMAT_SD2 | SF_FORMAT_PCM_16 ; break ; case 3 : psf->sf.format = SF_FORMAT_SD2 | SF_FORMAT_PCM_24 ; break ; case 4 : psf->sf.format = SF_FORMAT_SD2 | SF_FORMAT_PCM_32 ; break ; default : psf_log_printf (psf, "Bad sample size (%d)\n", rsrc->sample_size) ; return SFE_SD2_BAD_SAMPLE_SIZE ; } ; psf_log_printf (psf, "ok\n") ; return 0 ; } /* parse_str_rsrc */
./CrossVul/dataset_final_sorted/CWE-119/c/good_2384_0
crossvul-cpp_data_good_929_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImage method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ typedef struct _PixelChannels { double channel[CompositePixelChannel]; } PixelChannels; static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels) { register ssize_t i; assert(pixels != (PixelChannels **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (PixelChannels *) NULL) pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]); pixels=(PixelChannels **) RelinquishMagickMemory(pixels); return(pixels); } static PixelChannels **AcquirePixelThreadSet(const Image *images) { const Image *next; PixelChannels **pixels; register ssize_t i; size_t columns, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(PixelChannels **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); columns=images->columns; for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) number_threads; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelChannels *color_1, *color_2; double distance; register ssize_t i; color_1=(const PixelChannels *) x; color_2=(const PixelChannels *) y; distance=0.0; for (i=0; i < MaxPixelChannels; i++) distance+=color_1->channel[i]-(double) color_2->channel[i]; return(distance < 0 ? -1 : distance > 0 ? 1 : 0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel, const MagickEvaluateOperator op,const double value) { double result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(double) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(double) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() that returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(double) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise, value); break; } case LaplacianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+ 1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(double) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(double) (pixel+value); break; } case MedianEvaluateOperator: { result=(double) (pixel+value); break; } case MinEvaluateOperator: { result=(double) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(double) (value*pixel); break; } case OrEvaluateOperator: { result=(double) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise, value); break; } case PowEvaluateOperator: { result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double) value)); break; } case RightShiftEvaluateOperator: { result=(double) ((size_t) pixel >> (size_t) (value+0.5)); break; } case RootMeanSquareEvaluateOperator: { result=(double) (pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(double) (pixel-value); break; } case SumEvaluateOperator: { result=(double) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(double) (((double) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise, value); break; } case XorEvaluateOperator: { result=(double) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception) { const Image *p, *q; size_t columns, rows; q=images; columns=images->columns; rows=images->rows; for (p=images; p != (Image *) NULL; p=p->next) { if (p->number_channels > q->number_channels) q=p; if (p->columns > columns) columns=p->columns; if (p->rows > rows) rows=p->rows; } return(CloneImage(q,columns,rows,MagickTrue,exception)); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict evaluate_pixels; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j, k; for (j=0; j < (ssize_t) number_images; j++) for (k=0; k < MaxPixelChannels; k++) evaluate_pixel[j].channel[k]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; register ssize_t i; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); PixelTrait traits = GetPixelChannelTraits(next,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[j].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),op, evaluate_pixel[j].channel[i]); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); for (k=0; k < (ssize_t) GetPixelChannels(image); k++) q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[x].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),j == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].channel[i]); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; switch (op) { case MeanEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]/=(double) number_images; break; } case MultiplyEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) evaluate_pixel[x].channel[i]*=QuantumScale; } break; } case RootMeanSquareEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/ number_images); break; } default: break; } } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double result; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; if ((traits & UpdatePixelTrait) == 0) continue; result=ApplyEvaluateOperator(random_info[id],q[i],op,value); if (op == MeanEvaluateOperator) result/=2.0; q[i]=ClampToQuantum(result); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImage method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { double result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+ c1*x^2+c2*x+c3). */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel+parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { double amplitude, bias, frequency, phase; /* Sinusoid: frequency, phase, amplitude, bias. */ frequency=(number_parameters >= 1) ? parameters[0] : 1.0; phase=(number_parameters >= 2) ? parameters[1] : 0.0; amplitude=(number_parameters >= 3) ? parameters[2] : 0.5; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (QuantumRange*(amplitude*sin((double) (2.0* MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias)); break; } case ArcsinFunction: { double bias, center, range, width; /* Arcsin (peged at range limits for invalid results): width, center, range, and bias. */ width=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=2.0/width*(QuantumScale*pixel-center); if ( result <= -1.0 ) result=bias-range/2.0; else if (result >= 1.0) result=bias+range/2.0; else result=(double) (range/MagickPI*asin((double) result)+bias); result*=QuantumRange; break; } case ArctanFunction: { double center, bias, range, slope; /* Arctan: slope, center, range, and bias. */ slope=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (MagickPI*slope*(QuantumScale*pixel-center)); result=(double) (QuantumRange*(range/MagickPI*atan((double) result)+bias)); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateFunctionImage(image,function,number_parameters,parameters, exception) != MagickFalse) return(MagickTrue); #endif if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyFunction(q[i],function,number_parameters,parameters, exception); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageEntropy() returns the entropy of one or more image channels. % % The format of the GetImageEntropy method is: % % MagickBooleanType GetImageEntropy(const Image *image,double *entropy, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtrema() returns the extrema of one or more image channels. % % The format of the GetImageExtrema method is: % % MagickBooleanType GetImageExtrema(const Image *image,size_t *minima, % size_t *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageRange(image,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageKurtosis() returns the kurtosis and skewness of one or more image % channels. % % The format of the GetImageKurtosis method is: % % MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis, % double *skewness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *kurtosis=channel_statistics[CompositePixelChannel].kurtosis; *skewness=channel_statistics[CompositePixelChannel].skewness; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMean() returns the mean and standard deviation of one or more image % channels. % % The format of the GetImageMean method is: % % MagickBooleanType GetImageMean(const Image *image,double *mean, % double *standard_deviation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *mean=channel_statistics[CompositePixelChannel].mean; *standard_deviation= channel_statistics[CompositePixelChannel].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageMoments method is: % % ChannelMoments *GetImageMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); } MagickExport ChannelMoments *GetImageMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 CacheView *image_view; ChannelMoments *channel_moments; double M00[MaxPixelChannels+1], M01[MaxPixelChannels+1], M02[MaxPixelChannels+1], M03[MaxPixelChannels+1], M10[MaxPixelChannels+1], M11[MaxPixelChannels+1], M12[MaxPixelChannels+1], M20[MaxPixelChannels+1], M21[MaxPixelChannels+1], M22[MaxPixelChannels+1], M30[MaxPixelChannels+1]; PointInfo centroid[MaxPixelChannels+1]; ssize_t channel, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) memset(channel_moments,0,(MaxPixelChannels+1)* sizeof(*channel_moments)); (void) memset(centroid,0,sizeof(centroid)); (void) memset(M00,0,sizeof(M00)); (void) memset(M01,0,sizeof(M01)); (void) memset(M02,0,sizeof(M02)); (void) memset(M03,0,sizeof(M03)); (void) memset(M10,0,sizeof(M10)); (void) memset(M11,0,sizeof(M11)); (void) memset(M12,0,sizeof(M12)); (void) memset(M20,0,sizeof(M20)); (void) memset(M21,0,sizeof(M21)); (void) memset(M22,0,sizeof(M22)); (void) memset(M30,0,sizeof(M30)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M00[channel]+=QuantumScale*p[i]; M00[MaxPixelChannels]+=QuantumScale*p[i]; M10[channel]+=x*QuantumScale*p[i]; M10[MaxPixelChannels]+=x*QuantumScale*p[i]; M01[channel]+=y*QuantumScale*p[i]; M01[MaxPixelChannels]+=y*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute center of mass (centroid). */ if (M00[channel] < MagickEpsilon) { M00[channel]+=MagickEpsilon; centroid[channel].x=(double) image->columns/2.0; centroid[channel].y=(double) image->rows/2.0; continue; } M00[channel]+=MagickEpsilon; centroid[channel].x=M10[channel]/M00[channel]; centroid[channel].y=M01[channel]/M00[channel]; } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute the image moments. */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } M00[MaxPixelChannels]/=GetImageChannels(image); M01[MaxPixelChannels]/=GetImageChannels(image); M02[MaxPixelChannels]/=GetImageChannels(image); M03[MaxPixelChannels]/=GetImageChannels(image); M10[MaxPixelChannels]/=GetImageChannels(image); M11[MaxPixelChannels]/=GetImageChannels(image); M12[MaxPixelChannels]/=GetImageChannels(image); M20[MaxPixelChannels]/=GetImageChannels(image); M21[MaxPixelChannels]/=GetImageChannels(image); M22[MaxPixelChannels]/=GetImageChannels(image); M30[MaxPixelChannels]/=GetImageChannels(image); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0* M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon))); if (fabs(M11[channel]) < MagickEpsilon) { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=0.0; } else if (M11[channel] < 0.0) { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=180.0; } else { if (fabs(M20[channel]-M02[channel]) < MagickEpsilon) channel_moments[channel].ellipse_angle+=0.0; else if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=0.0; } channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y/ (channel_moments[channel].ellipse_axis.x+MagickEpsilon))); channel_moments[channel].ellipse_intensity=M00[channel]/ (MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0); M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0); M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0); M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0); M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0); M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0); M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0); M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0); M00[channel]=1.0; } image_view=DestroyCacheView(image_view); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].invariant[0]=M20[channel]+M02[channel]; channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+ M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImagePerceptualHash method is: % % ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, ExceptionInfo *exception) { ChannelPerceptualHash *perceptual_hash; char *colorspaces, *q; const char *artifact; MagickBooleanType status; register char *p; register ssize_t i; perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( MaxPixelChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); artifact=GetImageArtifact(image,"phash:colorspaces"); if (artifact != NULL) colorspaces=AcquireString(artifact); else colorspaces=AcquireString("sRGB,HCLp"); perceptual_hash[0].number_colorspaces=0; perceptual_hash[0].number_channels=0; q=colorspaces; for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++) { ChannelMoments *moments; Image *hash_image; size_t j; ssize_t channel, colorspace; if (i >= MaximumNumberOfPerceptualColorspaces) break; colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p); if (colorspace < 0) break; perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace; hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) break; hash_image->depth=8; status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace, exception); if (status == MagickFalse) break; moments=GetImageMoments(hash_image,exception); perceptual_hash[0].number_colorspaces++; perceptual_hash[0].number_channels+=GetImageChannels(hash_image); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) break; for (channel=0; channel <= MaxPixelChannels; channel++) for (j=0; j < MaximumNumberOfImageMoments; j++) perceptual_hash[channel].phash[i][j]= (-MagickLog10(moments[channel].invariant[j])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); } colorspaces=DestroyString(colorspaces); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRange() returns the range of one or more image channels. % % The format of the GetImageRange method is: % % MagickBooleanType GetImageRange(const Image *image,double *minima, % double *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima, double *maxima,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType initialize, status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; initialize=MagickTrue; *maxima=0.0; *minima=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,initialize) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double row_maxima = 0.0, row_minima = 0.0; MagickBooleanType row_initialize; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } row_initialize=MagickTrue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (row_initialize != MagickFalse) { row_minima=(double) p[i]; row_maxima=(double) p[i]; row_initialize=MagickFalse; } else { if ((double) p[i] < row_minima) row_minima=(double) p[i]; if ((double) p[i] > row_maxima) row_maxima=(double) p[i]; } } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageRange) #endif { if (initialize != MagickFalse) { *minima=row_minima; *maxima=row_maxima; initialize=MagickFalse; } else { if (row_minima < *minima) *minima=row_minima; if (row_maxima > *maxima) *maxima=row_maxima; } } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageStatistics() returns statistics for each channel in the image. The % statistics include the channel depth, its minima, maxima, mean, standard % deviation, kurtosis and skewness. You can access the red channel mean, for % example, like this: % % channel_statistics=GetImageStatistics(image,exception); % red_mean=channel_statistics[RedPixelChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageStatistics method is: % % ChannelStatistics *GetImageStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area, *histogram, standard_deviation; MagickStatusType status; QuantumAny range; register ssize_t i; size_t depth; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); channel_statistics=(ChannelStatistics *) AcquireQuantumMemory( MaxPixelChannels+1,sizeof(*channel_statistics)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (double *) NULL)) { if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) memset(channel_statistics,0,(MaxPixelChannels+1)* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute pixel statistics. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[channel].depth; range=GetQuantumRange(depth); status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range), range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[channel].depth++; i--; continue; } } if ((double) p[i] < channel_statistics[channel].minima) channel_statistics[channel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[channel].maxima) channel_statistics[channel].maxima=(double) p[i]; channel_statistics[channel].sum+=p[i]; channel_statistics[channel].sum_squared+=(double) p[i]*p[i]; channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]* p[i]; channel_statistics[channel].area++; if ((double) p[i] < channel_statistics[CompositePixelChannel].minima) channel_statistics[CompositePixelChannel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima) channel_statistics[CompositePixelChannel].maxima=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum((double) p[i]))+i]++; channel_statistics[CompositePixelChannel].sum+=(double) p[i]; channel_statistics[CompositePixelChannel].sum_squared+=(double) p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].area++; } p+=GetPixelChannels(image); } } for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Normalize pixel statistics. */ area=PerceptibleReciprocal(channel_statistics[i].area); channel_statistics[i].sum*=area; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area- 1.0)*channel_statistics[i].area*standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double number_bins; register ssize_t j; /* Compute pixel entropy. */ PixelChannel channel = GetPixelChannelChannel(image,i); number_bins=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) if (histogram[GetPixelChannels(image)*j+i] > 0.0) number_bins++; area=PerceptibleReciprocal(channel_statistics[channel].area); for (j=0; j <= (ssize_t) MaxMap; j++) { double count; count=area*histogram[GetPixelChannels(image)*j+i]; channel_statistics[channel].entropy+=-count*MagickLog10(count)* PerceptibleReciprocal(MagickLog10(number_bins)); channel_statistics[CompositePixelChannel].entropy+=-count* MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/ GetPixelChannels(image); } } histogram=(double *) RelinquishMagickMemory(histogram); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Compute kurtosis & skewness statistics. */ standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } channel_statistics[CompositePixelChannel].mean=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; channel_statistics[CompositePixelChannel].entropy=0.0; for (i=0; i < (ssize_t) MaxPixelChannels; i++) { channel_statistics[CompositePixelChannel].mean+= channel_statistics[i].mean; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].standard_deviation; channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; } channel_statistics[CompositePixelChannel].mean/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].standard_deviation/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].entropy/=(double) GetImageChannels(image); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *polynomial_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,PolynomialImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ typedef struct _SkipNode { size_t next[9], count, signature; } SkipNode; typedef struct _SkipList { ssize_t level; SkipNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed; SkipList skip_list; size_t signature; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); if (pixel_list->skip_list.nodes != (SkipNode *) NULL) pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory( pixel_list->skip_list.nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) memset((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL, sizeof(*pixel_list->skip_list.nodes)); if (pixel_list->skip_list.nodes == (SkipNode *) NULL) return(DestroyPixelList(pixel_list)); (void) memset(pixel_list->skip_list.nodes,0,65537UL* sizeof(*pixel_list->skip_list.nodes)); pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) memset(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const size_t color) { register SkipList *p; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ p=(&pixel_list->skip_list); p->nodes[color].signature=pixel_list->signature; p->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=p->level; level >= 0; level--) { while (p->nodes[search].next[level] < color) search=p->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (p->level+2)) level=p->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > p->level) { p->level++; update[p->level]=65536UL; } /* Link the node into the skip-list. */ do { p->nodes[color].next[level]=p->nodes[update[level]].next[level]; p->nodes[update[level]].next[level]=color; } while (level-- > 0); } static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, maximum; ssize_t count; /* Find the maximum value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; maximum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color > maximum) maximum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) maximum); } static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the mean value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sum); } static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, minimum; ssize_t count; /* Find the minimum value for each of the color. */ p=(&pixel_list->skip_list); count=0; color=65536UL; minimum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color < minimum) minimum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) minimum); } static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, max_count, mode; ssize_t count; /* Make each pixel the 'predominant color' of the specified neighborhood. */ p=(&pixel_list->skip_list); color=65536L; mode=color; max_count=p->nodes[mode].count; count=0; do { color=p->nodes[color].next[0]; if (p->nodes[color].count > max_count) { mode=color; max_count=p->nodes[mode].count; } count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) mode); } static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, next, previous; ssize_t count; /* Finds the non peak value for each of the colors. */ p=(&pixel_list->skip_list); color=65536L; next=p->nodes[color].next[0]; count=0; do { previous=color; color=next; next=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetRootMeanSquarePixelList(PixelList *pixel_list, Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the root mean square value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) (p->nodes[color].count*color*color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum)); } static inline void GetStandardDeviationPixelList(PixelList *pixel_list, Quantum *pixel) { double sum, sum_squared; register SkipList *p; size_t color; ssize_t count; /* Find the standard-deviation value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; for (i=0; i < (ssize_t) p->nodes[color].count; i++) sum_squared+=((double) color)*((double) color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum))); } static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(pixel); signature=pixel_list->skip_list.nodes[index].signature; if (signature == pixel_list->signature) { pixel_list->skip_list.nodes[index].count++; return; } AddNodePixelList(pixel_list,index); } static void ResetPixelList(PixelList *pixel_list) { int level; register SkipNode *root; register SkipList *p; /* Reset the skip-list. */ p=(&pixel_list->skip_list); root=p->nodes+65536UL; p->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; ssize_t center, y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,0,0,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(statistic_image,DirectClass,exception); if (status == MagickFalse) { statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1)); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))* (MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y- (ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1), MagickMax(height,1),exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) statistic_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { Quantum pixel; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image, channel); if ((traits == UndefinedPixelTrait) || (statistic_traits == UndefinedPixelTrait)) continue; if (((statistic_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(statistic_image,channel,p[center+i],q); continue; } if ((statistic_traits & UpdatePixelTrait) == 0) continue; pixels=p; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { InsertPixelList(pixels[i],pixel_list[id]); pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } switch (type) { case GradientStatistic: { double maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=(double) pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=(double) pixel; pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum)); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { GetRootMeanSquarePixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } SetPixelChannel(statistic_image,channel,pixel,q); } p+=GetPixelChannels(image); q+=GetPixelChannels(statistic_image); } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
./CrossVul/dataset_final_sorted/CWE-119/c/good_929_0
crossvul-cpp_data_bad_4781_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % JJJJJ PPPP EEEEE GGGG % % J P P E G % % J PPPP EEE G GG % % J J P E G G % % JJJ P EEEEE GGG % % % % % % Read/Write JPEG Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This software is based in part on the work of the Independent JPEG Group. % See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and % licensing restrictions. Blob support contributed by Glenn Randers-Pehrson. % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" #include <setjmp.h> #if defined(MAGICKCORE_JPEG_DELEGATE) #define JPEG_INTERNAL_OPTIONS #if defined(__MINGW32__) || defined(__MINGW64__) # define XMD_H 1 /* Avoid conflicting typedef for INT32 */ #endif #undef HAVE_STDLIB_H #include "jpeglib.h" #include "jerror.h" #endif /* Define declarations. */ #define ICC_MARKER (JPEG_APP0+2) #define ICC_PROFILE "ICC_PROFILE" #define IPTC_MARKER (JPEG_APP0+13) #define XML_MARKER (JPEG_APP0+1) #define MaxBufferExtent 16384 /* Typedef declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) typedef struct _DestinationManager { struct jpeg_destination_mgr manager; Image *image; JOCTET *buffer; } DestinationManager; typedef struct _ErrorManager { Image *image; MagickBooleanType finished; StringInfo *profile; jmp_buf error_recovery; } ErrorManager; typedef struct _SourceManager { struct jpeg_source_mgr manager; Image *image; JOCTET *buffer; boolean start_of_blob; } SourceManager; #endif typedef struct _QuantizationTable { char *slot, *description; size_t width, height; double divisor; unsigned int *levels; } QuantizationTable; /* Forward declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) static MagickBooleanType WriteJPEGImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J P E G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJPEG() returns MagickTrue if the image format type, identified by the % magick string, is JPEG. % % The format of the IsJPEG method is: % % MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) { if (length < 3) return(MagickFalse); if (memcmp(magick,"\377\330\377",3) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJPEGImage() reads a JPEG image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer to % the new image. % % The format of the ReadJPEGImage method is: % % Image *ReadJPEGImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static boolean FillInputBuffer(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image, MaxBufferExtent,source->buffer); if (source->manager.bytes_in_buffer == 0) { if (source->start_of_blob != FALSE) ERREXIT(cinfo,JERR_INPUT_EMPTY); WARNMS(cinfo,JWRN_JPEG_EOF); source->buffer[0]=(JOCTET) 0xff; source->buffer[1]=(JOCTET) JPEG_EOI; source->manager.bytes_in_buffer=2; } source->manager.next_input_byte=source->buffer; source->start_of_blob=FALSE; return(TRUE); } static int GetCharacter(j_decompress_ptr jpeg_info) { if (jpeg_info->src->bytes_in_buffer == 0) (void) (*jpeg_info->src->fill_input_buffer)(jpeg_info); jpeg_info->src->bytes_in_buffer--; return((int) GETJOCTET(*jpeg_info->src->next_input_byte++)); } static void InitializeSource(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->start_of_blob=TRUE; } static MagickBooleanType IsITUFaxImage(const Image *image) { const StringInfo *profile; const unsigned char *datum; profile=GetImageProfile(image,"8bim"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if (GetStringInfoLength(profile) < 5) return(MagickFalse); datum=GetStringInfoDatum(profile); if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) && (datum[3] == 0x41) && (datum[4] == 0x58)) return(MagickTrue); return(MagickFalse); } static void JPEGErrorHandler(j_common_ptr jpeg_info) { char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; (jpeg_info->err->format_message)(jpeg_info,message); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); if (error_manager->finished != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,(char *) message,"`%s'",image->filename); else (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,(char *) message,"`%s'",image->filename); longjmp(error_manager->error_recovery,1); } static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level) { #define JPEGExcessiveWarnings 1000 char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; if (level < 0) { /* Process warning message. */ (jpeg_info->err->format_message)(jpeg_info,message); if (jpeg_info->err->num_warnings++ > JPEGExcessiveWarnings) JPEGErrorHandler(jpeg_info); ThrowBinaryException(CorruptImageWarning,(char *) message, image->filename); } else if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level)) { /* Process trace message. */ (jpeg_info->err->format_message)(jpeg_info,message); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); } return(MagickTrue); } static boolean ReadComment(j_decompress_ptr jpeg_info) { ErrorManager *error_manager; Image *image; register unsigned char *p; register ssize_t i; size_t length; StringInfo *comment; /* Determine length of comment. */ error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; comment=BlobToStringInfo((const void *) NULL,length); if (comment == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } /* Read comment. */ error_manager->profile=comment; p=GetStringInfoDatum(comment); for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++) *p++=(unsigned char) GetCharacter(jpeg_info); *p='\0'; error_manager->profile=NULL; p=GetStringInfoDatum(comment); (void) SetImageProperty(image,"comment",(const char *) p); comment=DestroyStringInfo(comment); return(TRUE); } static boolean ReadICCProfile(j_decompress_ptr jpeg_info) { char magick[12]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *icc_profile, *profile; /* Read color profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } for (i=0; i < 12; i++) magick[i]=(char) GetCharacter(jpeg_info); if (LocaleCompare(magick,ICC_PROFILE) != 0) { /* Not a ICC profile, return. */ for (i=0; i < (ssize_t) (length-12); i++) (void) GetCharacter(jpeg_info); return(TRUE); } (void) GetCharacter(jpeg_info); /* id */ (void) GetCharacter(jpeg_info); /* markers */ length-=14; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; icc_profile=(StringInfo *) GetImageProfile(image,"icc"); if (icc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(icc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: ICC, %.20g bytes",(double) length); return(TRUE); } static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info) { char magick[MaxTextExtent]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *iptc_profile, *profile; /* Determine length of binary data stored here. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } /* Validate that this was written as a Photoshop resource format slug. */ for (i=0; i < 10; i++) magick[i]=(char) GetCharacter(jpeg_info); magick[10]='\0'; length-=10; if (length <= 10) return(TRUE); if (LocaleCompare(magick,"Photoshop ") != 0) { /* Not a IPTC profile, return. */ for (i=0; i < (ssize_t) length; i++) (void) GetCharacter(jpeg_info); return(TRUE); } /* Remove the version number. */ for (i=0; i < 4; i++) (void) GetCharacter(jpeg_info); if (length <= 11) return(TRUE); length-=4; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; iptc_profile=(StringInfo *) GetImageProfile(image,"8bim"); if (iptc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(iptc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: iptc, %.20g bytes",(double) length); return(TRUE); } static boolean ReadProfile(j_decompress_ptr jpeg_info) { char name[MaxTextExtent]; const StringInfo *previous_profile; ErrorManager *error_manager; Image *image; int marker; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *profile; /* Read generic profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; marker=jpeg_info->unread_marker-JPEG_APP0; (void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker); error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; if (marker == 1) { p=GetStringInfoDatum(profile); if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0)) (void) CopyMagickString(name,"exif",MaxTextExtent); if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0)) { ssize_t j; /* Extract namespace from XMP profile. */ p=GetStringInfoDatum(profile); for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++) { if (*p == '\0') break; p++; } if (j < (ssize_t) GetStringInfoLength(profile)) (void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1))); (void) CopyMagickString(name,"xmp",MaxTextExtent); } } previous_profile=GetImageProfile(image,name); if (previous_profile != (const StringInfo *) NULL) { size_t length; length=GetStringInfoLength(profile); SetStringInfoLength(profile,GetStringInfoLength(profile)+ GetStringInfoLength(previous_profile)); (void) memmove(GetStringInfoDatum(profile)+ GetStringInfoLength(previous_profile),GetStringInfoDatum(profile), length); (void) memcpy(GetStringInfoDatum(profile), GetStringInfoDatum(previous_profile), GetStringInfoLength(previous_profile)); } status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: %s, %.20g bytes",name,(double) length); return(TRUE); } static void SkipInputData(j_decompress_ptr cinfo,long number_bytes) { SourceManager *source; if (number_bytes <= 0) return; source=(SourceManager *) cinfo->src; while (number_bytes > (long) source->manager.bytes_in_buffer) { number_bytes-=(long) source->manager.bytes_in_buffer; (void) FillInputBuffer(cinfo); } source->manager.next_input_byte+=number_bytes; source->manager.bytes_in_buffer-=number_bytes; } static void TerminateSource(j_decompress_ptr cinfo) { (void) cinfo; } static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image) { SourceManager *source; cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager)); source=(SourceManager *) cinfo->src; source->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); source=(SourceManager *) cinfo->src; source->manager.init_source=InitializeSource; source->manager.fill_input_buffer=FillInputBuffer; source->manager.skip_input_data=SkipInputData; source->manager.resync_to_restart=jpeg_resync_to_restart; source->manager.term_source=TerminateSource; source->manager.bytes_in_buffer=0; source->manager.next_input_byte=NULL; source->image=image; } static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info, Image *image) { image->quality=UndefinedCompressionQuality; #if defined(D_PROGRESSIVE_SUPPORTED) if (image->compression == LosslessJPEGCompression) { image->quality=100; (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: 100 (lossless)"); } else #endif { ssize_t j, qvalue, sum; register ssize_t i; /* Determine the JPEG compression quality from the quantization tables. */ sum=0; for (i=0; i < NUM_QUANT_TBLS; i++) { if (jpeg_info->quant_tbl_ptrs[i] != NULL) for (j=0; j < DCTSIZE2; j++) sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j]; } if ((jpeg_info->quant_tbl_ptrs[0] != NULL) && (jpeg_info->quant_tbl_ptrs[1] != NULL)) { ssize_t hash[101] = { 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, 0 }, sums[101] = { 32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]+ jpeg_info->quant_tbl_ptrs[1]->quantval[0]+ jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } else if (jpeg_info->quant_tbl_ptrs[0] != NULL) { ssize_t hash[101] = { 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, 0 }, sums[101] = { 16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } } } static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image) { char sampling_factor[MaxTextExtent]; switch (jpeg_info->out_color_space) { case JCS_CMYK: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } case JCS_GRAYSCALE: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAYSCALE"); (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor); break; } case JCS_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", jpeg_info->out_color_space); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } } (void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s", sampling_factor); } static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterJPEGImage() adds properties for the JPEG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterJPEGImage method is: % % size_t RegisterJPEGImage(void) % */ ModuleExport size_t RegisterJPEGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char description[] = "Joint Photographic Experts Group JFIF format"; *version='\0'; #if defined(JPEG_LIB_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION); #endif entry=SetMagickInfo("JPE"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPS"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PJPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterJPEGImage() removes format registrations made by the % JPEG module from the list of supported formats. % % The format of the UnregisterJPEGImage method is: % % UnregisterJPEGImage(void) % */ ModuleExport void UnregisterJPEGImage(void) { (void) UnregisterMagickInfo("PJPG"); (void) UnregisterMagickInfo("JPS"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPEG"); (void) UnregisterMagickInfo("JPE"); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJPEGImage() writes a JPEG image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the WriteJPEGImage method is: % % MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o jpeg_image: The image. % % */ static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); } static boolean EmptyOutputBuffer(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image, MaxBufferExtent,destination->buffer); if (destination->manager.free_in_buffer != MaxBufferExtent) ERREXIT(cinfo,JERR_FILE_WRITE); destination->manager.next_output_byte=destination->buffer; return(TRUE); } static QuantizationTable *GetQuantizationTable(const char *filename, const char *slot,ExceptionInfo *exception) { char *p, *xml; const char *attribute, *content; double value; register ssize_t i; QuantizationTable *table; size_t length; ssize_t j; XMLTreeInfo *description, *levels, *quantization_tables, *table_iterator; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading quantization tables \"%s\" ...",filename); table=(QuantizationTable *) NULL; xml=FileToString(filename,~0UL,exception); if (xml == (char *) NULL) return(table); quantization_tables=NewXMLTree(xml,exception); if (quantization_tables == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } for (table_iterator=GetXMLTreeChild(quantization_tables,"table"); table_iterator != (XMLTreeInfo *) NULL; table_iterator=GetNextXMLTreeTag(table_iterator)) { attribute=GetXMLTreeAttribute(table_iterator,"slot"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; attribute=GetXMLTreeAttribute(table_iterator,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; } if (table_iterator == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } description=GetXMLTreeChild(table_iterator,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<description>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } levels=GetXMLTreeChild(table_iterator,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<levels>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table)); if (table == (QuantizationTable *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); table->slot=(char *) NULL; table->description=(char *) NULL; table->levels=(unsigned int *) NULL; attribute=GetXMLTreeAttribute(table_iterator,"slot"); if (attribute != (char *) NULL) table->slot=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) table->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels width>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->width=StringToUnsignedLong(attribute); if (table->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels width>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->height=StringToUnsignedLong(attribute); if (table->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->divisor=InterpretLocaleValue(attribute,(char **) NULL); if (table->divisor == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent","<levels>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } length=(size_t) table->width*table->height; if (length < 64) length=64; table->levels=(unsigned int *) AcquireQuantumMemory(length, sizeof(*table->levels)); if (table->levels == (unsigned int *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); for (i=0; i < (ssize_t) (table->width*table->height); i++) { table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/ table->divisor+0.5); while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; content=p; } value=InterpretLocaleValue(content,&p); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent","<level> too many values, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } for (j=i; j < 64; j++) table->levels[j]=table->levels[j-1]; quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } static void InitializeDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); destination->manager.next_output_byte=destination->buffer; destination->manager.free_in_buffer=MaxBufferExtent; } static void TerminateDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0) { ssize_t count; count=WriteBlob(destination->image,MaxBufferExtent- destination->manager.free_in_buffer,destination->buffer); if (count != (ssize_t) (MaxBufferExtent-destination->manager.free_in_buffer)) ERREXIT(cinfo,JERR_FILE_WRITE); } } static void WriteProfile(j_compress_ptr jpeg_info,Image *image) { const char *name; const StringInfo *profile; MagickBooleanType iptc; register ssize_t i; size_t length, tag_length; StringInfo *custom_profile; /* Save image profile as a APP marker. */ iptc=MagickFalse; custom_profile=AcquireStringInfo(65535L); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { register unsigned char *p; profile=GetImageProfile(image,name); p=GetStringInfoDatum(custom_profile); if (LocaleCompare(name,"EXIF") == 0) for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65533L) { length=MagickMin(GetStringInfoLength(profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile)+i, (unsigned int) length); } if (LocaleCompare(name,"ICC") == 0) { register unsigned char *p; tag_length=strlen(ICC_PROFILE); p=GetStringInfoDatum(custom_profile); (void) CopyMagickMemory(p,ICC_PROFILE,tag_length); p[tag_length]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L) { length=MagickMin(GetStringInfoLength(profile)-i,65519L); p[12]=(unsigned char) ((i/65519L)+1); p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1); (void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i, length); jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+3)); } } if (((LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse)) { size_t roundup; iptc=MagickTrue; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L) { length=MagickMin(GetStringInfoLength(profile)-i,65500L); roundup=(size_t) (length & 0x01); if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0) { (void) memcpy(p,"Photoshop 3.0 ",14); tag_length=14; } else { (void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24); tag_length=26; p[24]=(unsigned char) (length >> 8); p[25]=(unsigned char) (length & 0xff); } p[13]=0x00; (void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length); if (roundup != 0) p[length+tag_length]='\0'; jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+roundup)); } } if (LocaleCompare(name,"XMP") == 0) { StringInfo *xmp_profile; /* Add namespace to XMP profile. */ xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ "); if (xmp_profile != (StringInfo *) NULL) { if (profile != (StringInfo *) NULL) ConcatenateStringInfo(xmp_profile,profile); GetStringInfoDatum(xmp_profile)[28]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L) { length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER, GetStringInfoDatum(xmp_profile)+i,(unsigned int) length); } xmp_profile=DestroyStringInfo(xmp_profile); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), "%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile)); name=GetNextImageProfile(image); } custom_profile=DestroyStringInfo(custom_profile); } static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image) { DestinationManager *destination; cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager)); destination=(DestinationManager *) cinfo->dest; destination->manager.init_destination=InitializeDestination; destination->manager.empty_output_buffer=EmptyOutputBuffer; destination->manager.term_destination=TerminateDestination; destination->image=image; } static char **SamplingFactorToList(const char *text) { char **textlist; register char *q; register const char *p; register ssize_t i; if (text == (char *) NULL) return((char **) NULL); /* Convert string to an ASCII list. */ textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS, sizeof(*textlist)); if (textlist == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); p=text; for (i=0; i < (ssize_t) MAX_COMPONENTS; i++) { for (q=(char *) p; *q != '\0'; q++) if (*q == ',') break; textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent, sizeof(*textlist[i])); if (textlist[i] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); (void) CopyMagickString(textlist[i],p,(size_t) (q-p+1)); if (*q == '\r') q++; if (*q == '\0') break; p=q+1; } for (i++; i < (ssize_t) MAX_COMPONENTS; i++) textlist[i]=ConstantString("1x1"); return(textlist); } static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, Image *image) { const char *option, *sampling_factor, *value; ErrorManager error_manager; ExceptionInfo *exception; Image *volatile volatile_image; int colorspace, quality; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType status; MemoryInfo *memory_info; register JSAMPLE *q; register ssize_t i; ssize_t y; struct jpeg_compress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; unsigned short scale; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if ((LocaleCompare(image_info->magick,"JPS") == 0) && (image->next != (Image *) NULL)) image=AppendImages(image,MagickFalse,exception); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); volatile_image=image; jpeg_info.client_data=(void *) volatile_image; jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; error_manager.image=volatile_image; memory_info=(MemoryInfo *) NULL; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); (void) CloseBlob(volatile_image); return(MagickFalse); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_compress(&jpeg_info); JPEGDestinationManager(&jpeg_info,image); if ((image->columns != (unsigned int) image->columns) || (image->rows != (unsigned int) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); jpeg_info.image_width=(unsigned int) image->columns; jpeg_info.image_height=(unsigned int) image->rows; jpeg_info.input_components=3; jpeg_info.data_precision=8; jpeg_info.in_color_space=JCS_RGB; switch (image->colorspace) { case CMYKColorspace: { jpeg_info.input_components=4; jpeg_info.in_color_space=JCS_CMYK; break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { jpeg_info.in_color_space=JCS_YCbCr; break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { if (image_info->type == TrueColorType) break; jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; break; } default: { (void) TransformImageColorspace(image,sRGBColorspace); if (image_info->type == TrueColorType) break; if (SetImageGray(image,&image->exception) != MagickFalse) { jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; } break; } } jpeg_set_defaults(&jpeg_info); if (jpeg_info.in_color_space == JCS_CMYK) jpeg_set_colorspace(&jpeg_info,JCS_YCCK); if ((jpeg_info.data_precision != 12) && (image->depth <= 8)) jpeg_info.data_precision=8; else jpeg_info.data_precision=BITS_IN_JSAMPLE; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { /* Set image resolution. */ jpeg_info.write_JFIF_header=TRUE; jpeg_info.X_density=(UINT16) image->x_resolution; jpeg_info.Y_density=(UINT16) image->y_resolution; /* Set image resolution units. */ if (image->units == PixelsPerInchResolution) jpeg_info.density_unit=(UINT8) 1; if (image->units == PixelsPerCentimeterResolution) jpeg_info.density_unit=(UINT8) 2; } jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:optimize-coding"); if (option != (const char *) NULL) jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; else { MagickSizeType length; length=(MagickSizeType) jpeg_info.input_components*image->columns* image->rows*sizeof(JSAMPLE); if (length == (MagickSizeType) ((size_t) length)) { /* Perform optimization only if available memory resources permit it. */ status=AcquireMagickResource(MemoryResource,length); RelinquishMagickResource(MemoryResource,length); jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE; } } #if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED) if ((LocaleCompare(image_info->magick,"PJPEG") == 0) || (image_info->interlace != NoInterlace)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); jpeg_simple_progression(&jpeg_info); } else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: non-progressive"); #else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); #endif quality=92; if ((image_info->compression != LosslessJPEGCompression) && (image->quality <= 100)) { if (image->quality != UndefinedCompressionQuality) quality=(int) image->quality; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g", (double) image->quality); } else { #if !defined(C_LOSSLESS_SUPPORTED) quality=100; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100"); #else if (image->quality < 100) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"LosslessToLossyJPEGConversion",image->filename); else { int point_transform, predictor; predictor=image->quality/100; /* range 1-7 */ point_transform=image->quality % 20; /* range 0-15 */ jpeg_simple_lossless(&jpeg_info,predictor,point_transform); if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Compression: lossless"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Predictor: %d",predictor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Point Transform: %d",point_transform); } } #endif } option=GetImageOption(image_info,"jpeg:extent"); if (option != (const char *) NULL) { Image *jpeg_image; ImageInfo *jpeg_info; jpeg_info=CloneImageInfo(image_info); jpeg_info->blob=NULL; jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image != (Image *) NULL) { MagickSizeType extent; size_t maximum, minimum; /* Search for compression quality that does not exceed image extent. */ jpeg_image->quality=0; extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0); (void) DeleteImageOption(jpeg_info,"jpeg:extent"); (void) DeleteImageArtifact(jpeg_image,"jpeg:extent"); maximum=image_info->quality; if (maximum < 2) maximum=101; for (minimum=2; minimum < maximum; ) { (void) AcquireUniqueFilename(jpeg_image->filename); jpeg_image->quality=minimum+(maximum-minimum+1)/2; (void) WriteJPEGImage(jpeg_info,jpeg_image); if (GetBlobSize(jpeg_image) <= extent) minimum=jpeg_image->quality+1; else maximum=jpeg_image->quality-1; (void) RelinquishUniqueFileResource(jpeg_image->filename); } quality=(int) minimum-1; jpeg_image=DestroyImage(jpeg_image); } jpeg_info=DestroyImageInfo(jpeg_info); } jpeg_set_quality(&jpeg_info,quality,TRUE); #if (JPEG_LIB_VERSION >= 70) option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) { GeometryInfo geometry_info; int flags; /* Set quality scaling for luminance and chrominance separately. */ flags=ParseGeometry(option,&geometry_info); if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0)) { jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int) (geometry_info.rho+0.5)); jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int) (geometry_info.sigma+0.5)); jpeg_default_qtables(&jpeg_info,TRUE); } } #endif colorspace=jpeg_info.in_color_space; value=GetImageOption(image_info,"jpeg:colorspace"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:colorspace"); if (value != (char *) NULL) colorspace=StringToInteger(value); sampling_factor=(const char *) NULL; if (colorspace == jpeg_info.in_color_space) { value=GetImageOption(image_info,"jpeg:sampling-factor"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor == (const char *) NULL) { if (quality >= 90) for (i=0; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } else { char **factors; GeometryInfo geometry_info; MagickStatusType flags; /* Set sampling factor. */ i=0; factors=SamplingFactorToList(sampling_factor); if (factors != (char **) NULL) { for (i=0; i < MAX_COMPONENTS; i++) { if (factors[i] == (char *) NULL) break; flags=ParseGeometry(factors[i],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho; jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma; factors[i]=(char *) RelinquishMagickMemory(factors[i]); } factors=(char **) RelinquishMagickMemory(factors); } for ( ; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } option=GetImageOption(image_info,"jpeg:q-table"); if (option != (const char *) NULL) { QuantizationTable *table; /* Custom quantization tables. */ table=GetQuantizationTable(option,"0",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=0; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=0; jpeg_add_quant_table(&jpeg_info,0,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"1",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=1; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=1; jpeg_add_quant_table(&jpeg_info,1,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"2",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=2; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=2; jpeg_add_quant_table(&jpeg_info,2,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"3",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=3; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=3; jpeg_add_quant_table(&jpeg_info,3,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } } jpeg_start_compress(&jpeg_info,TRUE); if (image->debug != MagickFalse) { if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g", (double) image->depth); if (image->colors != 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: %.20g",(double) image->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: unspecified"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "JPEG data precision: %d",(int) jpeg_info.data_precision); switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); break; } default: break; } switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAY"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor); break; } case sRGBColorspace: case RGBColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image colorspace is RGB"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", image->colorspace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } } } /* Write JPEG profiles. */ value=GetImageProperty(image,"comment"); if (value != (char *) NULL) for (i=0; i < (ssize_t) strlen(value); i+=65533L) jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i, (unsigned int) MagickMin((size_t) strlen(value+i),65533L)); if (image->profiles != (void *) NULL) WriteProfile(&jpeg_info,image); /* Convert MIFF to JPEG raster pixels. */ memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.input_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickFalse); } scanline[0]=(JSAMPROW) jpeg_pixels; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (scale == 0) scale=1; if (jpeg_info.data_precision <= 8) { if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p)); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum( GetPixelLuma(image,p))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelCyan(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelMagenta(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelYellow(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelBlack(indexes+x)))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum( GetPixelLuma(image,p)))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange- GetPixelIndex(indexes+x))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (y == (ssize_t) image->rows) jpeg_finish_compress(&jpeg_info); /* Relinquish resources. */ jpeg_destroy_compress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-119/c/bad_4781_0
crossvul-cpp_data_bad_253_0
/** * @file * Read/parse/write an NNTP config file of subscribed newsgroups * * @authors * Copyright (C) 1998 Brandon Long <blong@fiction.net> * Copyright (C) 1999 Andrej Gritsenko <andrej@lucky.net> * Copyright (C) 2000-2017 Vsevolod Volkov <vvv@mutt.org.ua> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page newsrc Read/parse/write an NNTP config file of subscribed newsgroups * * Read/parse/write an NNTP config file of subscribed newsgroups */ #include "config.h" #include <dirent.h> #include <errno.h> #include <limits.h> #include <stdbool.h> #include <stdio.h> #include <string.h> #include <sys/stat.h> #include <time.h> #include <unistd.h> #include "mutt/mutt.h" #include "conn/conn.h" #include "mutt.h" #include "bcache.h" #include "context.h" #include "format_flags.h" #include "globals.h" #include "header.h" #include "mutt_account.h" #include "mutt_curses.h" #include "mutt_socket.h" #include "mutt_window.h" #include "mx.h" #include "nntp.h" #include "options.h" #include "protos.h" #include "sort.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif struct BodyCache; /** * nntp_data_find - Find NntpData for given newsgroup or add it * @param nserv NNTP server * @param group Newsgroup * @retval ptr NNTP data * @retval NULL Error */ static struct NntpData *nntp_data_find(struct NntpServer *nserv, const char *group) { struct NntpData *nntp_data = mutt_hash_find(nserv->groups_hash, group); if (nntp_data) return nntp_data; size_t len = strlen(group) + 1; /* create NntpData structure and add it to hash */ nntp_data = mutt_mem_calloc(1, sizeof(struct NntpData) + len); nntp_data->group = (char *) nntp_data + sizeof(struct NntpData); mutt_str_strfcpy(nntp_data->group, group, len); nntp_data->nserv = nserv; nntp_data->deleted = true; mutt_hash_insert(nserv->groups_hash, nntp_data->group, nntp_data); /* add NntpData to list */ if (nserv->groups_num >= nserv->groups_max) { nserv->groups_max *= 2; mutt_mem_realloc(&nserv->groups_list, nserv->groups_max * sizeof(nntp_data)); } nserv->groups_list[nserv->groups_num++] = nntp_data; return nntp_data; } /** * nntp_acache_free - Remove all temporarily cache files * @param nntp_data NNTP data */ void nntp_acache_free(struct NntpData *nntp_data) { for (int i = 0; i < NNTP_ACACHE_LEN; i++) { if (nntp_data->acache[i].path) { unlink(nntp_data->acache[i].path); FREE(&nntp_data->acache[i].path); } } } /** * nntp_data_free - Free NntpData, used to destroy hash elements * @param data NNTP data */ void nntp_data_free(void *data) { struct NntpData *nntp_data = data; if (!nntp_data) return; nntp_acache_free(nntp_data); mutt_bcache_close(&nntp_data->bcache); FREE(&nntp_data->newsrc_ent); FREE(&nntp_data->desc); FREE(&data); } /** * nntp_hash_destructor - Free our hash table data * @param type Type (UNUSED) * @param obj NNTP data * @param data Data (UNUSED) */ void nntp_hash_destructor(int type, void *obj, intptr_t data) { nntp_data_free(obj); } /** * nntp_newsrc_close - Unlock and close .newsrc file * @param nserv NNTP server */ void nntp_newsrc_close(struct NntpServer *nserv) { if (!nserv->newsrc_fp) return; mutt_debug(1, "Unlocking %s\n", nserv->newsrc_file); mutt_file_unlock(fileno(nserv->newsrc_fp)); mutt_file_fclose(&nserv->newsrc_fp); } /** * nntp_group_unread_stat - Count number of unread articles using .newsrc data * @param nntp_data NNTP data */ void nntp_group_unread_stat(struct NntpData *nntp_data) { nntp_data->unread = 0; if (nntp_data->last_message == 0 || nntp_data->first_message > nntp_data->last_message) return; nntp_data->unread = nntp_data->last_message - nntp_data->first_message + 1; for (unsigned int i = 0; i < nntp_data->newsrc_len; i++) { anum_t first = nntp_data->newsrc_ent[i].first; if (first < nntp_data->first_message) first = nntp_data->first_message; anum_t last = nntp_data->newsrc_ent[i].last; if (last > nntp_data->last_message) last = nntp_data->last_message; if (first <= last) nntp_data->unread -= last - first + 1; } } /** * nntp_newsrc_parse - Parse .newsrc file * @param nserv NNTP server * @retval 0 Not changed * @retval 1 Parsed * @retval -1 Error */ int nntp_newsrc_parse(struct NntpServer *nserv) { char *line = NULL; struct stat sb; if (nserv->newsrc_fp) { /* if we already have a handle, close it and reopen */ mutt_file_fclose(&nserv->newsrc_fp); } else { /* if file doesn't exist, create it */ nserv->newsrc_fp = mutt_file_fopen(nserv->newsrc_file, "a"); mutt_file_fclose(&nserv->newsrc_fp); } /* open .newsrc */ nserv->newsrc_fp = mutt_file_fopen(nserv->newsrc_file, "r"); if (!nserv->newsrc_fp) { mutt_perror(nserv->newsrc_file); return -1; } /* lock it */ mutt_debug(1, "Locking %s\n", nserv->newsrc_file); if (mutt_file_lock(fileno(nserv->newsrc_fp), 0, 1)) { mutt_file_fclose(&nserv->newsrc_fp); return -1; } if (stat(nserv->newsrc_file, &sb)) { mutt_perror(nserv->newsrc_file); nntp_newsrc_close(nserv); return -1; } if (nserv->size == sb.st_size && nserv->mtime == sb.st_mtime) return 0; nserv->size = sb.st_size; nserv->mtime = sb.st_mtime; nserv->newsrc_modified = true; mutt_debug(1, "Parsing %s\n", nserv->newsrc_file); /* .newsrc has been externally modified or hasn't been loaded yet */ for (unsigned int i = 0; i < nserv->groups_num; i++) { struct NntpData *nntp_data = nserv->groups_list[i]; if (!nntp_data) continue; nntp_data->subscribed = false; nntp_data->newsrc_len = 0; FREE(&nntp_data->newsrc_ent); } line = mutt_mem_malloc(sb.st_size + 1); while (sb.st_size && fgets(line, sb.st_size + 1, nserv->newsrc_fp)) { char *b = NULL, *h = NULL; unsigned int j = 1; bool subs = false; /* find end of newsgroup name */ char *p = strpbrk(line, ":!"); if (!p) continue; /* ":" - subscribed, "!" - unsubscribed */ if (*p == ':') subs = true; *p++ = '\0'; /* get newsgroup data */ struct NntpData *nntp_data = nntp_data_find(nserv, line); FREE(&nntp_data->newsrc_ent); /* count number of entries */ b = p; while (*b) if (*b++ == ',') j++; nntp_data->newsrc_ent = mutt_mem_calloc(j, sizeof(struct NewsrcEntry)); nntp_data->subscribed = subs; /* parse entries */ j = 0; while (p) { b = p; /* find end of entry */ p = strchr(p, ','); if (p) *p++ = '\0'; /* first-last or single number */ h = strchr(b, '-'); if (h) *h++ = '\0'; else h = b; if (sscanf(b, ANUM, &nntp_data->newsrc_ent[j].first) == 1 && sscanf(h, ANUM, &nntp_data->newsrc_ent[j].last) == 1) { j++; } } if (j == 0) { nntp_data->newsrc_ent[j].first = 1; nntp_data->newsrc_ent[j].last = 0; j++; } if (nntp_data->last_message == 0) nntp_data->last_message = nntp_data->newsrc_ent[j - 1].last; nntp_data->newsrc_len = j; mutt_mem_realloc(&nntp_data->newsrc_ent, j * sizeof(struct NewsrcEntry)); nntp_group_unread_stat(nntp_data); mutt_debug(2, "%s\n", nntp_data->group); } FREE(&line); return 1; } /** * nntp_newsrc_gen_entries - Generate array of .newsrc entries * @param ctx Mailbox */ void nntp_newsrc_gen_entries(struct Context *ctx) { struct NntpData *nntp_data = ctx->data; anum_t last = 0, first = 1; bool series; int save_sort = SORT_ORDER; unsigned int entries; if (Sort != SORT_ORDER) { save_sort = Sort; Sort = SORT_ORDER; mutt_sort_headers(ctx, 0); } entries = nntp_data->newsrc_len; if (!entries) { entries = 5; nntp_data->newsrc_ent = mutt_mem_calloc(entries, sizeof(struct NewsrcEntry)); } /* Set up to fake initial sequence from 1 to the article before the * first article in our list */ nntp_data->newsrc_len = 0; series = true; for (int i = 0; i < ctx->msgcount; i++) { /* search for first unread */ if (series) { /* We don't actually check sequential order, since we mark * "missing" entries as read/deleted */ last = NHDR(ctx->hdrs[i])->article_num; if (last >= nntp_data->first_message && !ctx->hdrs[i]->deleted && !ctx->hdrs[i]->read) { if (nntp_data->newsrc_len >= entries) { entries *= 2; mutt_mem_realloc(&nntp_data->newsrc_ent, entries * sizeof(struct NewsrcEntry)); } nntp_data->newsrc_ent[nntp_data->newsrc_len].first = first; nntp_data->newsrc_ent[nntp_data->newsrc_len].last = last - 1; nntp_data->newsrc_len++; series = false; } } /* search for first read */ else { if (ctx->hdrs[i]->deleted || ctx->hdrs[i]->read) { first = last + 1; series = true; } last = NHDR(ctx->hdrs[i])->article_num; } } if (series && first <= nntp_data->last_loaded) { if (nntp_data->newsrc_len >= entries) { entries++; mutt_mem_realloc(&nntp_data->newsrc_ent, entries * sizeof(struct NewsrcEntry)); } nntp_data->newsrc_ent[nntp_data->newsrc_len].first = first; nntp_data->newsrc_ent[nntp_data->newsrc_len].last = nntp_data->last_loaded; nntp_data->newsrc_len++; } mutt_mem_realloc(&nntp_data->newsrc_ent, nntp_data->newsrc_len * sizeof(struct NewsrcEntry)); if (save_sort != Sort) { Sort = save_sort; mutt_sort_headers(ctx, 0); } } /** * update_file - Update file with new contents * @param filename File to update * @param buf New context * @retval 0 Success * @retval -1 Failure */ static int update_file(char *filename, char *buf) { FILE *fp = NULL; char tmpfile[PATH_MAX]; int rc = -1; while (true) { snprintf(tmpfile, sizeof(tmpfile), "%s.tmp", filename); fp = mutt_file_fopen(tmpfile, "w"); if (!fp) { mutt_perror(tmpfile); *tmpfile = '\0'; break; } if (fputs(buf, fp) == EOF) { mutt_perror(tmpfile); break; } if (mutt_file_fclose(&fp) == EOF) { mutt_perror(tmpfile); fp = NULL; break; } fp = NULL; if (rename(tmpfile, filename) < 0) { mutt_perror(filename); break; } *tmpfile = '\0'; rc = 0; break; } if (fp) mutt_file_fclose(&fp); if (*tmpfile) unlink(tmpfile); return rc; } /** * nntp_newsrc_update - Update .newsrc file * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ int nntp_newsrc_update(struct NntpServer *nserv) { char *buf = NULL; size_t buflen, off; int rc = -1; if (!nserv) return -1; buflen = 10 * LONG_STRING; buf = mutt_mem_calloc(1, buflen); off = 0; /* we will generate full newsrc here */ for (unsigned int i = 0; i < nserv->groups_num; i++) { struct NntpData *nntp_data = nserv->groups_list[i]; if (!nntp_data || !nntp_data->newsrc_ent) continue; /* write newsgroup name */ if (off + strlen(nntp_data->group) + 3 > buflen) { buflen *= 2; mutt_mem_realloc(&buf, buflen); } snprintf(buf + off, buflen - off, "%s%c ", nntp_data->group, nntp_data->subscribed ? ':' : '!'); off += strlen(buf + off); /* write entries */ for (unsigned int j = 0; j < nntp_data->newsrc_len; j++) { if (off + LONG_STRING > buflen) { buflen *= 2; mutt_mem_realloc(&buf, buflen); } if (j) buf[off++] = ','; if (nntp_data->newsrc_ent[j].first == nntp_data->newsrc_ent[j].last) snprintf(buf + off, buflen - off, "%u", nntp_data->newsrc_ent[j].first); else if (nntp_data->newsrc_ent[j].first < nntp_data->newsrc_ent[j].last) { snprintf(buf + off, buflen - off, "%u-%u", nntp_data->newsrc_ent[j].first, nntp_data->newsrc_ent[j].last); } off += strlen(buf + off); } buf[off++] = '\n'; } buf[off] = '\0'; /* newrc being fully rewritten */ mutt_debug(1, "Updating %s\n", nserv->newsrc_file); if (nserv->newsrc_file && update_file(nserv->newsrc_file, buf) == 0) { struct stat sb; rc = stat(nserv->newsrc_file, &sb); if (rc == 0) { nserv->size = sb.st_size; nserv->mtime = sb.st_mtime; } else { mutt_perror(nserv->newsrc_file); } } FREE(&buf); return rc; } /** * cache_expand - Make fully qualified cache file name * @param dst Buffer for filename * @param dstlen Length of buffer * @param acct Account * @param src Path to add to the URL */ static void cache_expand(char *dst, size_t dstlen, struct Account *acct, char *src) { char *c = NULL; char file[PATH_MAX]; /* server subdirectory */ if (acct) { struct Url url; mutt_account_tourl(acct, &url); url.path = src; url_tostring(&url, file, sizeof(file), U_PATH); } else mutt_str_strfcpy(file, src ? src : "", sizeof(file)); snprintf(dst, dstlen, "%s/%s", NewsCacheDir, file); /* remove trailing slash */ c = dst + strlen(dst) - 1; if (*c == '/') *c = '\0'; mutt_expand_path(dst, dstlen); mutt_encode_path(dst, dstlen, dst); } /** * nntp_expand_path - Make fully qualified url from newsgroup name * @param line String containing newsgroup name * @param len Length of string * @param acct Account to save result */ void nntp_expand_path(char *line, size_t len, struct Account *acct) { struct Url url; mutt_account_tourl(acct, &url); url.path = mutt_str_strdup(line); url_tostring(&url, line, len, 0); FREE(&url.path); } /** * nntp_add_group - Parse newsgroup * @param line String to parse * @param data NNTP data * @retval 0 Always */ int nntp_add_group(char *line, void *data) { struct NntpServer *nserv = data; struct NntpData *nntp_data = NULL; char group[LONG_STRING]; char desc[HUGE_STRING] = ""; char mod; anum_t first, last; if (!nserv || !line) return 0; if (sscanf(line, "%s " ANUM " " ANUM " %c %[^\n]", group, &last, &first, &mod, desc) < 4) return 0; nntp_data = nntp_data_find(nserv, group); nntp_data->deleted = false; nntp_data->first_message = first; nntp_data->last_message = last; nntp_data->allowed = (mod == 'y') || (mod == 'm'); mutt_str_replace(&nntp_data->desc, desc); if (nntp_data->newsrc_ent || nntp_data->last_cached) nntp_group_unread_stat(nntp_data); else if (nntp_data->last_message && nntp_data->first_message <= nntp_data->last_message) nntp_data->unread = nntp_data->last_message - nntp_data->first_message + 1; else nntp_data->unread = 0; return 0; } /** * active_get_cache - Load list of all newsgroups from cache * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ static int active_get_cache(struct NntpServer *nserv) { char buf[HUGE_STRING]; char file[PATH_MAX]; time_t t; cache_expand(file, sizeof(file), &nserv->conn->account, ".active"); mutt_debug(1, "Parsing %s\n", file); FILE *fp = mutt_file_fopen(file, "r"); if (!fp) return -1; if (fgets(buf, sizeof(buf), fp) == NULL || sscanf(buf, "%ld%s", &t, file) != 1 || t == 0) { mutt_file_fclose(&fp); return -1; } nserv->newgroups_time = t; mutt_message(_("Loading list of groups from cache...")); while (fgets(buf, sizeof(buf), fp)) nntp_add_group(buf, nserv); nntp_add_group(NULL, NULL); mutt_file_fclose(&fp); mutt_clear_error(); return 0; } /** * nntp_active_save_cache - Save list of all newsgroups to cache * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ int nntp_active_save_cache(struct NntpServer *nserv) { char file[PATH_MAX]; char *buf = NULL; size_t buflen, off; int rc; if (!nserv->cacheable) return 0; buflen = 10 * LONG_STRING; buf = mutt_mem_calloc(1, buflen); snprintf(buf, buflen, "%lu\n", (unsigned long) nserv->newgroups_time); off = strlen(buf); for (unsigned int i = 0; i < nserv->groups_num; i++) { struct NntpData *nntp_data = nserv->groups_list[i]; if (!nntp_data || nntp_data->deleted) continue; if (off + strlen(nntp_data->group) + (nntp_data->desc ? strlen(nntp_data->desc) : 0) + 50 > buflen) { buflen *= 2; mutt_mem_realloc(&buf, buflen); } snprintf(buf + off, buflen - off, "%s %u %u %c%s%s\n", nntp_data->group, nntp_data->last_message, nntp_data->first_message, nntp_data->allowed ? 'y' : 'n', nntp_data->desc ? " " : "", nntp_data->desc ? nntp_data->desc : ""); off += strlen(buf + off); } cache_expand(file, sizeof(file), &nserv->conn->account, ".active"); mutt_debug(1, "Updating %s\n", file); rc = update_file(file, buf); FREE(&buf); return rc; } #ifdef USE_HCACHE /** * nntp_hcache_namer - Compose hcache file names * @param path Path of message * @param dest Buffer for filename * @param destlen Length of buffer * @retval num Characters written to buffer * * Used by mutt_hcache_open() to compose hcache file name */ static int nntp_hcache_namer(const char *path, char *dest, size_t destlen) { return snprintf(dest, destlen, "%s.hcache", path); } /** * nntp_hcache_open - Open newsgroup hcache * @param nntp_data NNTP data * @retval ptr Header cache * @retval NULL Error */ header_cache_t *nntp_hcache_open(struct NntpData *nntp_data) { struct Url url; char file[PATH_MAX]; if (!nntp_data->nserv || !nntp_data->nserv->cacheable || !nntp_data->nserv->conn || !nntp_data->group || !(nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed)) { return NULL; } mutt_account_tourl(&nntp_data->nserv->conn->account, &url); url.path = nntp_data->group; url_tostring(&url, file, sizeof(file), U_PATH); return mutt_hcache_open(NewsCacheDir, file, nntp_hcache_namer); } /** * nntp_hcache_update - Remove stale cached headers * @param nntp_data NNTP data * @param hc Header cache */ void nntp_hcache_update(struct NntpData *nntp_data, header_cache_t *hc) { char buf[16]; bool old = false; void *hdata = NULL; anum_t first = 0, last = 0; if (!hc) return; /* fetch previous values of first and last */ hdata = mutt_hcache_fetch_raw(hc, "index", 5); if (hdata) { mutt_debug(2, "mutt_hcache_fetch index: %s\n", (char *) hdata); if (sscanf(hdata, ANUM " " ANUM, &first, &last) == 2) { old = true; nntp_data->last_cached = last; /* clean removed headers from cache */ for (anum_t current = first; current <= last; current++) { if (current >= nntp_data->first_message && current <= nntp_data->last_message) continue; snprintf(buf, sizeof(buf), "%u", current); mutt_debug(2, "mutt_hcache_delete %s\n", buf); mutt_hcache_delete(hc, buf, strlen(buf)); } } mutt_hcache_free(hc, &hdata); } /* store current values of first and last */ if (!old || nntp_data->first_message != first || nntp_data->last_message != last) { snprintf(buf, sizeof(buf), "%u %u", nntp_data->first_message, nntp_data->last_message); mutt_debug(2, "mutt_hcache_store index: %s\n", buf); mutt_hcache_store_raw(hc, "index", 5, buf, strlen(buf)); } } #endif /** * nntp_bcache_delete - Remove bcache file * @param id Body cache ID * @param bcache Body cache * @param data NNTP data * @retval 0 Always */ static int nntp_bcache_delete(const char *id, struct BodyCache *bcache, void *data) { struct NntpData *nntp_data = data; anum_t anum; char c; if (!nntp_data || sscanf(id, ANUM "%c", &anum, &c) != 1 || anum < nntp_data->first_message || anum > nntp_data->last_message) { if (nntp_data) mutt_debug(2, "mutt_bcache_del %s\n", id); mutt_bcache_del(bcache, id); } return 0; } /** * nntp_bcache_update - Remove stale cached messages * @param nntp_data NNTP data */ void nntp_bcache_update(struct NntpData *nntp_data) { mutt_bcache_list(nntp_data->bcache, nntp_bcache_delete, nntp_data); } /** * nntp_delete_group_cache - Remove hcache and bcache of newsgroup * @param nntp_data NNTP data */ void nntp_delete_group_cache(struct NntpData *nntp_data) { if (!nntp_data || !nntp_data->nserv || !nntp_data->nserv->cacheable) return; #ifdef USE_HCACHE char file[PATH_MAX]; nntp_hcache_namer(nntp_data->group, file, sizeof(file)); cache_expand(file, sizeof(file), &nntp_data->nserv->conn->account, file); unlink(file); nntp_data->last_cached = 0; mutt_debug(2, "%s\n", file); #endif if (!nntp_data->bcache) { nntp_data->bcache = mutt_bcache_open(&nntp_data->nserv->conn->account, nntp_data->group); } if (nntp_data->bcache) { mutt_debug(2, "%s/*\n", nntp_data->group); mutt_bcache_list(nntp_data->bcache, nntp_bcache_delete, NULL); mutt_bcache_close(&nntp_data->bcache); } } /** * nntp_clear_cache - Clear the NNTP cache * @param nserv NNTP server * * Remove hcache and bcache of all unexistent and unsubscribed newsgroups */ void nntp_clear_cache(struct NntpServer *nserv) { char file[PATH_MAX]; char *fp = NULL; struct dirent *entry = NULL; DIR *dp = NULL; if (!nserv || !nserv->cacheable) return; cache_expand(file, sizeof(file), &nserv->conn->account, NULL); dp = opendir(file); if (dp) { mutt_str_strncat(file, sizeof(file), "/", 1); fp = file + strlen(file); while ((entry = readdir(dp))) { char *group = entry->d_name; struct stat sb; struct NntpData *nntp_data = NULL; struct NntpData nntp_tmp; if ((mutt_str_strcmp(group, ".") == 0) || (mutt_str_strcmp(group, "..") == 0)) continue; *fp = '\0'; mutt_str_strncat(file, sizeof(file), group, strlen(group)); if (stat(file, &sb)) continue; #ifdef USE_HCACHE if (S_ISREG(sb.st_mode)) { char *ext = group + strlen(group) - 7; if (strlen(group) < 8 || (mutt_str_strcmp(ext, ".hcache") != 0)) continue; *ext = '\0'; } else #endif if (!S_ISDIR(sb.st_mode)) continue; nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) { nntp_data = &nntp_tmp; nntp_data->nserv = nserv; nntp_data->group = group; nntp_data->bcache = NULL; } else if (nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed) continue; nntp_delete_group_cache(nntp_data); if (S_ISDIR(sb.st_mode)) { rmdir(file); mutt_debug(2, "%s\n", file); } } closedir(dp); } } /** * nntp_format_str - Expand the newsrc filename * @param[out] buf Buffer in which to save string * @param[in] buflen Buffer length * @param[in] col Starting column * @param[in] cols Number of screen columns * @param[in] op printf-like operator, e.g. 't' * @param[in] src printf-like format string * @param[in] prec Field precision, e.g. "-3.4" * @param[in] if_str If condition is met, display this string * @param[in] else_str Otherwise, display this string * @param[in] data Pointer to the mailbox Context * @param[in] flags Format flags * @retval src (unchanged) * * nntp_format_str() is a callback function for mutt_expando_format(). * * | Expando | Description * |:--------|:-------------------------------------------------------- * | \%a | Account url * | \%p | Port * | \%P | Port if specified * | \%s | News server name * | \%S | Url schema * | \%u | Username */ const char *nntp_format_str(char *buf, size_t buflen, size_t col, int cols, char op, const char *src, const char *prec, const char *if_str, const char *else_str, unsigned long data, enum FormatFlag flags) { struct NntpServer *nserv = (struct NntpServer *) data; struct Account *acct = &nserv->conn->account; struct Url url; char fn[SHORT_STRING], fmt[SHORT_STRING], *p = NULL; switch (op) { case 'a': mutt_account_tourl(acct, &url); url_tostring(&url, fn, sizeof(fn), U_PATH); p = strchr(fn, '/'); if (p) *p = '\0'; snprintf(fmt, sizeof(fmt), "%%%ss", prec); snprintf(buf, buflen, fmt, fn); break; case 'p': snprintf(fmt, sizeof(fmt), "%%%su", prec); snprintf(buf, buflen, fmt, acct->port); break; case 'P': *buf = '\0'; if (acct->flags & MUTT_ACCT_PORT) { snprintf(fmt, sizeof(fmt), "%%%su", prec); snprintf(buf, buflen, fmt, acct->port); } break; case 's': strncpy(fn, acct->host, sizeof(fn) - 1); mutt_str_strlower(fn); snprintf(fmt, sizeof(fmt), "%%%ss", prec); snprintf(buf, buflen, fmt, fn); break; case 'S': mutt_account_tourl(acct, &url); url_tostring(&url, fn, sizeof(fn), U_PATH); p = strchr(fn, ':'); if (p) *p = '\0'; snprintf(fmt, sizeof(fmt), "%%%ss", prec); snprintf(buf, buflen, fmt, fn); break; case 'u': snprintf(fmt, sizeof(fmt), "%%%ss", prec); snprintf(buf, buflen, fmt, acct->user); break; } return src; } /** * nntp_select_server - Open a connection to an NNTP server * @param server Server URI * @param leave_lock Leave the server locked? * @retval ptr NNTP server * @retval NULL Error * * Automatically loads a newsrc into memory, if necessary. Checks the * size/mtime of a newsrc file, if it doesn't match, load again. Hmm, if a * system has broken mtimes, this might mean the file is reloaded every time, * which we'd have to fix. */ struct NntpServer *nntp_select_server(char *server, bool leave_lock) { char file[PATH_MAX]; #ifdef USE_HCACHE char *p = NULL; #endif int rc; struct Account acct; struct NntpServer *nserv = NULL; struct NntpData *nntp_data = NULL; struct Connection *conn = NULL; struct Url url; if (!server || !*server) { mutt_error(_("No news server defined!")); return NULL; } /* create account from news server url */ acct.flags = 0; acct.port = NNTP_PORT; acct.type = MUTT_ACCT_TYPE_NNTP; snprintf(file, sizeof(file), "%s%s", strstr(server, "://") ? "" : "news://", server); if (url_parse(&url, file) < 0 || (url.path && *url.path) || !(url.scheme == U_NNTP || url.scheme == U_NNTPS) || !url.host || mutt_account_fromurl(&acct, &url) < 0) { url_free(&url); mutt_error(_("%s is an invalid news server specification!"), server); return NULL; } if (url.scheme == U_NNTPS) { acct.flags |= MUTT_ACCT_SSL; acct.port = NNTP_SSL_PORT; } url_free(&url); /* find connection by account */ conn = mutt_conn_find(NULL, &acct); if (!conn) return NULL; if (!(conn->account.flags & MUTT_ACCT_USER) && acct.flags & MUTT_ACCT_USER) { conn->account.flags |= MUTT_ACCT_USER; conn->account.user[0] = '\0'; } /* news server already exists */ nserv = conn->data; if (nserv) { if (nserv->status == NNTP_BYE) nserv->status = NNTP_NONE; if (nntp_open_connection(nserv) < 0) return NULL; rc = nntp_newsrc_parse(nserv); if (rc < 0) return NULL; /* check for new newsgroups */ if (!leave_lock && nntp_check_new_groups(nserv) < 0) rc = -1; /* .newsrc has been externally modified */ if (rc > 0) nntp_clear_cache(nserv); if (rc < 0 || !leave_lock) nntp_newsrc_close(nserv); return (rc < 0) ? NULL : nserv; } /* new news server */ nserv = mutt_mem_calloc(1, sizeof(struct NntpServer)); nserv->conn = conn; nserv->groups_hash = mutt_hash_create(1009, 0); mutt_hash_set_destructor(nserv->groups_hash, nntp_hash_destructor, 0); nserv->groups_max = 16; nserv->groups_list = mutt_mem_malloc(nserv->groups_max * sizeof(nntp_data)); rc = nntp_open_connection(nserv); /* try to create cache directory and enable caching */ nserv->cacheable = false; if (rc >= 0 && NewsCacheDir && *NewsCacheDir) { cache_expand(file, sizeof(file), &conn->account, NULL); if (mutt_file_mkdir(file, S_IRWXU) < 0) { mutt_error(_("Can't create %s: %s."), file, strerror(errno)); } nserv->cacheable = true; } /* load .newsrc */ if (rc >= 0) { mutt_expando_format(file, sizeof(file), 0, MuttIndexWindow->cols, NONULL(Newsrc), nntp_format_str, (unsigned long) nserv, 0); mutt_expand_path(file, sizeof(file)); nserv->newsrc_file = mutt_str_strdup(file); rc = nntp_newsrc_parse(nserv); } if (rc >= 0) { /* try to load list of newsgroups from cache */ if (nserv->cacheable && active_get_cache(nserv) == 0) rc = nntp_check_new_groups(nserv); /* load list of newsgroups from server */ else rc = nntp_active_fetch(nserv, false); } if (rc >= 0) nntp_clear_cache(nserv); #ifdef USE_HCACHE /* check cache files */ if (rc >= 0 && nserv->cacheable) { struct dirent *entry = NULL; DIR *dp = opendir(file); if (dp) { while ((entry = readdir(dp))) { header_cache_t *hc = NULL; void *hdata = NULL; char *group = entry->d_name; p = group + strlen(group) - 7; if (strlen(group) < 8 || (strcmp(p, ".hcache") != 0)) continue; *p = '\0'; nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) continue; hc = nntp_hcache_open(nntp_data); if (!hc) continue; /* fetch previous values of first and last */ hdata = mutt_hcache_fetch_raw(hc, "index", 5); if (hdata) { anum_t first, last; if (sscanf(hdata, ANUM " " ANUM, &first, &last) == 2) { if (nntp_data->deleted) { nntp_data->first_message = first; nntp_data->last_message = last; } if (last >= nntp_data->first_message && last <= nntp_data->last_message) { nntp_data->last_cached = last; mutt_debug(2, "%s last_cached=%u\n", nntp_data->group, last); } } mutt_hcache_free(hc, &hdata); } mutt_hcache_close(hc); } closedir(dp); } } #endif if (rc < 0 || !leave_lock) nntp_newsrc_close(nserv); if (rc < 0) { mutt_hash_destroy(&nserv->groups_hash); FREE(&nserv->groups_list); FREE(&nserv->newsrc_file); FREE(&nserv->authenticators); FREE(&nserv); mutt_socket_close(conn); mutt_socket_free(conn); return NULL; } conn->data = nserv; return nserv; } /** * nntp_article_status - Get status of articles from .newsrc * @param ctx Mailbox * @param hdr Email Header * @param group Newsgroup * @param anum Article number * * Full status flags are not supported by nntp, but we can fake some of them: * Read = a read message number is in the .newsrc * New = not read and not cached * Old = not read but cached */ void nntp_article_status(struct Context *ctx, struct Header *hdr, char *group, anum_t anum) { struct NntpData *nntp_data = ctx->data; if (group) nntp_data = mutt_hash_find(nntp_data->nserv->groups_hash, group); if (!nntp_data) return; for (unsigned int i = 0; i < nntp_data->newsrc_len; i++) { if ((anum >= nntp_data->newsrc_ent[i].first) && (anum <= nntp_data->newsrc_ent[i].last)) { /* can't use mutt_set_flag() because mx_update_context() didn't called yet */ hdr->read = true; return; } } /* article was not cached yet, it's new */ if (anum > nntp_data->last_cached) return; /* article isn't read but cached, it's old */ if (MarkOld) hdr->old = true; } /** * mutt_newsgroup_subscribe - Subscribe newsgroup * @param nserv NNTP server * @param group Newsgroup * @retval ptr NNTP data * @retval NULL Error */ struct NntpData *mutt_newsgroup_subscribe(struct NntpServer *nserv, char *group) { struct NntpData *nntp_data = NULL; if (!nserv || !nserv->groups_hash || !group || !*group) return NULL; nntp_data = nntp_data_find(nserv, group); nntp_data->subscribed = true; if (!nntp_data->newsrc_ent) { nntp_data->newsrc_ent = mutt_mem_calloc(1, sizeof(struct NewsrcEntry)); nntp_data->newsrc_len = 1; nntp_data->newsrc_ent[0].first = 1; nntp_data->newsrc_ent[0].last = 0; } return nntp_data; } /** * mutt_newsgroup_unsubscribe - Unsubscribe newsgroup * @param nserv NNTP server * @param group Newsgroup * @retval ptr NNTP data * @retval NULL Error */ struct NntpData *mutt_newsgroup_unsubscribe(struct NntpServer *nserv, char *group) { struct NntpData *nntp_data = NULL; if (!nserv || !nserv->groups_hash || !group || !*group) return NULL; nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) return NULL; nntp_data->subscribed = false; if (!SaveUnsubscribed) { nntp_data->newsrc_len = 0; FREE(&nntp_data->newsrc_ent); } return nntp_data; } /** * mutt_newsgroup_catchup - Catchup newsgroup * @param nserv NNTP server * @param group Newsgroup * @retval ptr NNTP data * @retval NULL Error */ struct NntpData *mutt_newsgroup_catchup(struct NntpServer *nserv, char *group) { struct NntpData *nntp_data = NULL; if (!nserv || !nserv->groups_hash || !group || !*group) return NULL; nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) return NULL; if (nntp_data->newsrc_ent) { mutt_mem_realloc(&nntp_data->newsrc_ent, sizeof(struct NewsrcEntry)); nntp_data->newsrc_len = 1; nntp_data->newsrc_ent[0].first = 1; nntp_data->newsrc_ent[0].last = nntp_data->last_message; } nntp_data->unread = 0; if (Context && Context->data == nntp_data) { for (unsigned int i = 0; i < Context->msgcount; i++) mutt_set_flag(Context, Context->hdrs[i], MUTT_READ, 1); } return nntp_data; } /** * mutt_newsgroup_uncatchup - Uncatchup newsgroup * @param nserv NNTP server * @param group Newsgroup * @retval ptr NNTP data * @retval NULL Error */ struct NntpData *mutt_newsgroup_uncatchup(struct NntpServer *nserv, char *group) { struct NntpData *nntp_data = NULL; if (!nserv || !nserv->groups_hash || !group || !*group) return NULL; nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) return NULL; if (nntp_data->newsrc_ent) { mutt_mem_realloc(&nntp_data->newsrc_ent, sizeof(struct NewsrcEntry)); nntp_data->newsrc_len = 1; nntp_data->newsrc_ent[0].first = 1; nntp_data->newsrc_ent[0].last = nntp_data->first_message - 1; } if (Context && Context->data == nntp_data) { nntp_data->unread = Context->msgcount; for (unsigned int i = 0; i < Context->msgcount; i++) mutt_set_flag(Context, Context->hdrs[i], MUTT_READ, 0); } else { nntp_data->unread = nntp_data->last_message; if (nntp_data->newsrc_ent) nntp_data->unread -= nntp_data->newsrc_ent[0].last; } return nntp_data; } /** * nntp_buffy - Get first newsgroup with new messages * @param buf Buffer for result * @param len Length of buffer */ void nntp_buffy(char *buf, size_t len) { for (unsigned int i = 0; i < CurrentNewsSrv->groups_num; i++) { struct NntpData *nntp_data = CurrentNewsSrv->groups_list[i]; if (!nntp_data || !nntp_data->subscribed || !nntp_data->unread) continue; if (Context && Context->magic == MUTT_NNTP && (mutt_str_strcmp(nntp_data->group, ((struct NntpData *) Context->data)->group) == 0)) { unsigned int unread = 0; for (unsigned int j = 0; j < Context->msgcount; j++) if (!Context->hdrs[j]->read && !Context->hdrs[j]->deleted) unread++; if (!unread) continue; } mutt_str_strfcpy(buf, nntp_data->group, len); break; } }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_253_0
crossvul-cpp_data_bad_2483_2
/* * Instruction-patching support. * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/init.h> #include <linux/string.h> #include <asm/patch.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/system.h> #include <asm/unistd.h> /* * This was adapted from code written by Tony Luck: * * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle * like this: * * 6 6 5 4 3 2 1 * 3210987654321098765432109876543210987654321098765432109876543210 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG * * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB */ static u64 get_imm64 (u64 insn_addr) { u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */ return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/ ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/ ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/ ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/ ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/ ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/ ((p[1] & 0x000007f000000000UL) >> 36); /*G*/ } /* Patch instruction with "val" where "mask" has 1 bits. */ void ia64_patch (u64 insn_addr, u64 mask, u64 val) { u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); # define insn_mask ((1UL << 41) - 1) unsigned long shift; b0 = b[0]; b1 = b[1]; shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */ if (shift >= 64) { m1 = mask << (shift - 64); v1 = val << (shift - 64); } else { m0 = mask << shift; m1 = mask >> (64 - shift); v0 = val << shift; v1 = val >> (64 - shift); b[0] = (b0 & ~m0) | (v0 & m0); } b[1] = (b1 & ~m1) | (v1 & m1); } void ia64_patch_imm64 (u64 insn_addr, u64 val) { /* The assembler may generate offset pointing to either slot 1 or slot 2 for a long (2-slot) instruction, occupying slots 1 and 2. */ insn_addr &= -16UL; ia64_patch(insn_addr + 2, 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); } void ia64_patch_imm60 (u64 insn_addr, u64 val) { /* The assembler may generate offset pointing to either slot 1 or slot 2 for a long (2-slot) instruction, occupying slots 1 and 2. */ insn_addr &= -16UL; ia64_patch(insn_addr + 2, 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); } /* * We need sometimes to load the physical address of a kernel * object. Often we can convert the virtual address to physical * at execution time, but sometimes (either for performance reasons * or during error recovery) we cannot to this. Patch the marked * bundles to load the physical address. */ void __init ia64_patch_vtop (unsigned long start, unsigned long end) { s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) offp + *offp; /* replace virtual address with corresponding physical address: */ ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip))); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } void __init ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) { static int first_time = 1; int need_workaround; s32 *offp = (s32 *) start; u64 *wp; need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0); if (first_time) { first_time = 0; if (need_workaround) printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n"); } if (need_workaround) return; while (offp < (s32 *) end) { wp = (u64 *) ia64_imva((char *) offp + *offp); wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ wp[1] = 0x0084006880000200UL; wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ wp[3] = 0x0004000000000200UL; ia64_fc(wp); ia64_fc(wp + 2); ++offp; } ia64_sync_i(); ia64_srlz_i(); } static void __init patch_fsyscall_table (unsigned long start, unsigned long end) { extern unsigned long fsyscall_table[NR_syscalls]; s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) ia64_imva((char *) offp + *offp); ia64_patch_imm64(ip, (u64) fsyscall_table); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } static void __init patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) { extern char fsys_bubble_down[]; s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) offp + *offp; ia64_patch_imm60((u64) ia64_imva((void *) ip), (u64) (fsys_bubble_down - (ip & -16)) / 16); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } void __init ia64_patch_gate (void) { # define START(name) ((unsigned long) __start_gate_##name##_patchlist) # define END(name) ((unsigned long)__end_gate_##name##_patchlist) patch_fsyscall_table(START(fsyscall), END(fsyscall)); patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); ia64_patch_vtop(START(vtop), END(vtop)); ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); } void ia64_patch_phys_stack_reg(unsigned long val) { s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; s32 * end = (s32 *) __end___phys_stack_reg_patchlist; u64 ip, mask, imm; /* see instruction format A4: adds r1 = imm13, r3 */ mask = (0x3fUL << 27) | (0x7f << 13); imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; while (offp < end) { ip = (u64) offp + *offp; ia64_patch(ip, mask, imm); ia64_fc(ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); }
./CrossVul/dataset_final_sorted/CWE-119/c/bad_2483_2
crossvul-cpp_data_good_3674_0
/* Task credentials management - see Documentation/security/credentials.txt * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/export.h> #include <linux/cred.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/init_task.h> #include <linux/security.h> #include <linux/binfmts.h> #include <linux/cn_proc.h> #if 0 #define kdebug(FMT, ...) \ printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #else #define kdebug(FMT, ...) \ no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__) #endif static struct kmem_cache *cred_jar; /* * The common credentials for the initial task's thread group */ #ifdef CONFIG_KEYS static struct thread_group_cred init_tgcred = { .usage = ATOMIC_INIT(2), .tgid = 0, .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock), }; #endif /* * The initial credentials for the initial task */ struct cred init_cred = { .usage = ATOMIC_INIT(4), #ifdef CONFIG_DEBUG_CREDENTIALS .subscribers = ATOMIC_INIT(2), .magic = CRED_MAGIC, #endif .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_EMPTY_SET, .cap_permitted = CAP_FULL_SET, .cap_effective = CAP_FULL_SET, .cap_bset = CAP_FULL_SET, .user = INIT_USER, .user_ns = &init_user_ns, .group_info = &init_groups, #ifdef CONFIG_KEYS .tgcred = &init_tgcred, #endif }; static inline void set_cred_subscribers(struct cred *cred, int n) { #ifdef CONFIG_DEBUG_CREDENTIALS atomic_set(&cred->subscribers, n); #endif } static inline int read_cred_subscribers(const struct cred *cred) { #ifdef CONFIG_DEBUG_CREDENTIALS return atomic_read(&cred->subscribers); #else return 0; #endif } static inline void alter_cred_subscribers(const struct cred *_cred, int n) { #ifdef CONFIG_DEBUG_CREDENTIALS struct cred *cred = (struct cred *) _cred; atomic_add(n, &cred->subscribers); #endif } /* * Dispose of the shared task group credentials */ #ifdef CONFIG_KEYS static void release_tgcred_rcu(struct rcu_head *rcu) { struct thread_group_cred *tgcred = container_of(rcu, struct thread_group_cred, rcu); BUG_ON(atomic_read(&tgcred->usage) != 0); key_put(tgcred->session_keyring); key_put(tgcred->process_keyring); kfree(tgcred); } #endif /* * Release a set of thread group credentials. */ static void release_tgcred(struct cred *cred) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = cred->tgcred; if (atomic_dec_and_test(&tgcred->usage)) call_rcu(&tgcred->rcu, release_tgcred_rcu); #endif } /* * The RCU callback to actually dispose of a set of credentials */ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); kdebug("put_cred_rcu(%p)", cred); #ifdef CONFIG_DEBUG_CREDENTIALS if (cred->magic != CRED_MAGIC_DEAD || atomic_read(&cred->usage) != 0 || read_cred_subscribers(cred) != 0) panic("CRED: put_cred_rcu() sees %p with" " mag %x, put %p, usage %d, subscr %d\n", cred, cred->magic, cred->put_addr, atomic_read(&cred->usage), read_cred_subscribers(cred)); #else if (atomic_read(&cred->usage) != 0) panic("CRED: put_cred_rcu() sees %p with usage %d\n", cred, atomic_read(&cred->usage)); #endif security_cred_free(cred); key_put(cred->thread_keyring); key_put(cred->request_key_auth); release_tgcred(cred); if (cred->group_info) put_group_info(cred->group_info); free_uid(cred->user); kmem_cache_free(cred_jar, cred); } /** * __put_cred - Destroy a set of credentials * @cred: The record to release * * Destroy a set of credentials on which no references remain. */ void __put_cred(struct cred *cred) { kdebug("__put_cred(%p{%d,%d})", cred, atomic_read(&cred->usage), read_cred_subscribers(cred)); BUG_ON(atomic_read(&cred->usage) != 0); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(cred) != 0); cred->magic = CRED_MAGIC_DEAD; cred->put_addr = __builtin_return_address(0); #endif BUG_ON(cred == current->cred); BUG_ON(cred == current->real_cred); call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); /* * Clean up a task's credentials when it exits */ void exit_creds(struct task_struct *tsk) { struct cred *cred; kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, atomic_read(&tsk->cred->usage), read_cred_subscribers(tsk->cred)); cred = (struct cred *) tsk->real_cred; tsk->real_cred = NULL; validate_creds(cred); alter_cred_subscribers(cred, -1); put_cred(cred); cred = (struct cred *) tsk->cred; tsk->cred = NULL; validate_creds(cred); alter_cred_subscribers(cred, -1); put_cred(cred); cred = (struct cred *) tsk->replacement_session_keyring; if (cred) { tsk->replacement_session_keyring = NULL; validate_creds(cred); put_cred(cred); } } /** * get_task_cred - Get another task's objective credentials * @task: The task to query * * Get the objective credentials of a task, pinning them so that they can't go * away. Accessing a task's credentials directly is not permitted. * * The caller must also make sure task doesn't get deleted, either by holding a * ref on task or by holding tasklist_lock to prevent it from being unlinked. */ const struct cred *get_task_cred(struct task_struct *task) { const struct cred *cred; rcu_read_lock(); do { cred = __task_cred((task)); BUG_ON(!cred); } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage)); rcu_read_unlock(); return cred; } /* * Allocate blank credentials, such that the credentials can be filled in at a * later date without risk of ENOMEM. */ struct cred *cred_alloc_blank(void) { struct cred *new; new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); if (!new) return NULL; #ifdef CONFIG_KEYS new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); if (!new->tgcred) { kmem_cache_free(cred_jar, new); return NULL; } atomic_set(&new->tgcred->usage, 1); #endif atomic_set(&new->usage, 1); #ifdef CONFIG_DEBUG_CREDENTIALS new->magic = CRED_MAGIC; #endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; return new; error: abort_creds(new); return NULL; } /** * prepare_creds - Prepare a new set of credentials for modification * * Prepare a new set of task credentials for modification. A task's creds * shouldn't generally be modified directly, therefore this function is used to * prepare a new copy, which the caller then modifies and then commits by * calling commit_creds(). * * Preparation involves making a copy of the objective creds for modification. * * Returns a pointer to the new creds-to-be if successful, NULL otherwise. * * Call commit_creds() or abort_creds() to clean up. */ struct cred *prepare_creds(void) { struct task_struct *task = current; const struct cred *old; struct cred *new; validate_process_creds(); new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_creds() alloc %p", new); old = task->cred; memcpy(new, old, sizeof(struct cred)); atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_group_info(new->group_info); get_uid(new->user); #ifdef CONFIG_KEYS key_get(new->thread_keyring); key_get(new->request_key_auth); atomic_inc(&new->tgcred->usage); #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; validate_creds(new); return new; error: abort_creds(new); return NULL; } EXPORT_SYMBOL(prepare_creds); /* * Prepare credentials for current to perform an execve() * - The caller must hold ->cred_guard_mutex */ struct cred *prepare_exec_creds(void) { struct thread_group_cred *tgcred = NULL; struct cred *new; #ifdef CONFIG_KEYS tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) return NULL; #endif new = prepare_creds(); if (!new) { kfree(tgcred); return new; } #ifdef CONFIG_KEYS /* newly exec'd tasks don't get a thread keyring */ key_put(new->thread_keyring); new->thread_keyring = NULL; /* create a new per-thread-group creds for all this set of threads to * share */ memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); /* inherit the session keyring; new process keyring */ key_get(tgcred->session_keyring); tgcred->process_keyring = NULL; release_tgcred(new); new->tgcred = tgcred; #endif return new; } /* * Copy credentials for the new process created by fork() * * We share if we can, but under some circumstances we have to generate a new * set. * * The new process gets the current process's subjective credentials as its * objective and subjective credentials */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred; #endif struct cred *new; int ret; p->replacement_session_keyring = NULL; if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && #endif clone_flags & CLONE_THREAD ) { p->real_cred = get_cred(p->cred); get_cred(p->cred); alter_cred_subscribers(p->cred, 2); kdebug("share_creds(%p{%d,%d})", p->cred, atomic_read(&p->cred->usage), read_cred_subscribers(p->cred)); atomic_inc(&p->cred->user->processes); return 0; } new = prepare_creds(); if (!new) return -ENOMEM; if (clone_flags & CLONE_NEWUSER) { ret = create_user_ns(new); if (ret < 0) goto error_put; } /* cache user_ns in cred. Doesn't need a refcount because it will * stay pinned by cred->user */ new->user_ns = new->user->user_ns; #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ if (new->thread_keyring) { key_put(new->thread_keyring); new->thread_keyring = NULL; if (clone_flags & CLONE_THREAD) install_thread_keyring_to_cred(new); } /* we share the process and session keyrings between all the threads in * a process - this is slightly icky as we violate COW credentials a * bit */ if (!(clone_flags & CLONE_THREAD)) { tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { ret = -ENOMEM; goto error_put; } atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); tgcred->process_keyring = NULL; tgcred->session_keyring = key_get(new->tgcred->session_keyring); release_tgcred(new); new->tgcred = tgcred; } #endif atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); alter_cred_subscribers(new, 2); validate_creds(new); return 0; error_put: put_cred(new); return ret; } /** * commit_creds - Install new credentials upon the current task * @new: The credentials to be assigned * * Install a new set of credentials to the current task, using RCU to replace * the old set. Both the objective and the subjective credentials pointers are * updated. This function may not be called if the subjective credentials are * in an overridden state. * * This function eats the caller's reference to the new credentials. * * Always returns 0 thus allowing this function to be tail-called at the end * of, say, sys_setgid(). */ int commit_creds(struct cred *new) { struct task_struct *task = current; const struct cred *old = task->real_cred; kdebug("commit_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); BUG_ON(task->cred != old); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(old) < 2); validate_creds(old); validate_creds(new); #endif BUG_ON(atomic_read(&new->usage) < 1); get_cred(new); /* we will require a ref for the subj creds too */ /* dumpability changes */ if (old->euid != new->euid || old->egid != new->egid || old->fsuid != new->fsuid || old->fsgid != new->fsgid || !cap_issubset(new->cap_permitted, old->cap_permitted)) { if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; smp_wmb(); } /* alter the thread keyring */ if (new->fsuid != old->fsuid) key_fsuid_changed(task); if (new->fsgid != old->fsgid) key_fsgid_changed(task); /* do it * RLIMIT_NPROC limits on user->processes have already been checked * in set_user(). */ alter_cred_subscribers(new, 2); if (new->user != old->user) atomic_inc(&new->user->processes); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) atomic_dec(&old->user->processes); alter_cred_subscribers(old, -2); /* send notifications */ if (new->uid != old->uid || new->euid != old->euid || new->suid != old->suid || new->fsuid != old->fsuid) proc_id_connector(task, PROC_EVENT_UID); if (new->gid != old->gid || new->egid != old->egid || new->sgid != old->sgid || new->fsgid != old->fsgid) proc_id_connector(task, PROC_EVENT_GID); /* release the old obj and subj refs both */ put_cred(old); put_cred(old); return 0; } EXPORT_SYMBOL(commit_creds); /** * abort_creds - Discard a set of credentials and unlock the current task * @new: The credentials that were going to be applied * * Discard a set of credentials that were under construction and unlock the * current task. */ void abort_creds(struct cred *new) { kdebug("abort_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); #ifdef CONFIG_DEBUG_CREDENTIALS BUG_ON(read_cred_subscribers(new) != 0); #endif BUG_ON(atomic_read(&new->usage) < 1); put_cred(new); } EXPORT_SYMBOL(abort_creds); /** * override_creds - Override the current process's subjective credentials * @new: The credentials to be assigned * * Install a set of temporary override subjective credentials on the current * process, returning the old set for later reversion. */ const struct cred *override_creds(const struct cred *new) { const struct cred *old = current->cred; kdebug("override_creds(%p{%d,%d})", new, atomic_read(&new->usage), read_cred_subscribers(new)); validate_creds(old); validate_creds(new); get_cred(new); alter_cred_subscribers(new, 1); rcu_assign_pointer(current->cred, new); alter_cred_subscribers(old, -1); kdebug("override_creds() = %p{%d,%d}", old, atomic_read(&old->usage), read_cred_subscribers(old)); return old; } EXPORT_SYMBOL(override_creds); /** * revert_creds - Revert a temporary subjective credentials override * @old: The credentials to be restored * * Revert a temporary set of override subjective credentials to an old set, * discarding the override set. */ void revert_creds(const struct cred *old) { const struct cred *override = current->cred; kdebug("revert_creds(%p{%d,%d})", old, atomic_read(&old->usage), read_cred_subscribers(old)); validate_creds(old); validate_creds(override); alter_cred_subscribers(old, 1); rcu_assign_pointer(current->cred, old); alter_cred_subscribers(override, -1); put_cred(override); } EXPORT_SYMBOL(revert_creds); /* * initialise the credentials stuff */ void __init cred_init(void) { /* allocate a slab in which we can store credentials */ cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /** * prepare_kernel_cred - Prepare a set of credentials for a kernel service * @daemon: A userspace daemon to be used as a reference * * Prepare a set of credentials for a kernel service. This can then be used to * override a task's own credentials so that work can be done on behalf of that * task that requires a different subjective context. * * @daemon is used to provide a base for the security record, but can be NULL. * If @daemon is supplied, then the security data will be derived from that; * otherwise they'll be set to 0 and no groups, full capabilities and no keys. * * The caller may change these controls afterwards if desired. * * Returns the new credentials or NULL if out of memory. * * Does not take, and does not return holding current->cred_replace_mutex. */ struct cred *prepare_kernel_cred(struct task_struct *daemon) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred; #endif const struct cred *old; struct cred *new; new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; #ifdef CONFIG_KEYS tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { kmem_cache_free(cred_jar, new); return NULL; } #endif kdebug("prepare_kernel_cred() alloc %p", new); if (daemon) old = get_task_cred(daemon); else old = get_cred(&init_cred); validate_creds(old); *new = *old; atomic_set(&new->usage, 1); set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); #ifdef CONFIG_KEYS atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); tgcred->process_keyring = NULL; tgcred->session_keyring = NULL; new->tgcred = tgcred; new->request_key_auth = NULL; new->thread_keyring = NULL; new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; put_cred(old); validate_creds(new); return new; error: put_cred(new); put_cred(old); return NULL; } EXPORT_SYMBOL(prepare_kernel_cred); /** * set_security_override - Set the security ID in a set of credentials * @new: The credentials to alter * @secid: The LSM security ID to set * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. */ int set_security_override(struct cred *new, u32 secid) { return security_kernel_act_as(new, secid); } EXPORT_SYMBOL(set_security_override); /** * set_security_override_from_ctx - Set the security ID in a set of credentials * @new: The credentials to alter * @secctx: The LSM security context to generate the security ID from. * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. The * security ID is specified in string form as a security context to be * interpreted by the LSM. */ int set_security_override_from_ctx(struct cred *new, const char *secctx) { u32 secid; int ret; ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); if (ret < 0) return ret; return set_security_override(new, secid); } EXPORT_SYMBOL(set_security_override_from_ctx); /** * set_create_files_as - Set the LSM file create context in a set of credentials * @new: The credentials to alter * @inode: The inode to take the context from * * Change the LSM file creation context in a set of credentials to be the same * as the object context of the specified inode, so that the new inodes have * the same MAC context as that inode. */ int set_create_files_as(struct cred *new, struct inode *inode) { new->fsuid = inode->i_uid; new->fsgid = inode->i_gid; return security_kernel_create_files_as(new, inode); } EXPORT_SYMBOL(set_create_files_as); #ifdef CONFIG_DEBUG_CREDENTIALS bool creds_are_invalid(const struct cred *cred) { if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX /* * cred->security == NULL if security_cred_alloc_blank() or * security_prepare_creds() returned an error. */ if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) return true; } #endif return false; } EXPORT_SYMBOL(creds_are_invalid); /* * dump invalid credentials */ static void dump_invalid_creds(const struct cred *cred, const char *label, const struct task_struct *tsk) { printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n", label, cred, cred == &init_cred ? "[init]" : "", cred == tsk->real_cred ? "[real]" : "", cred == tsk->cred ? "[eff]" : ""); printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n", cred->magic, cred->put_addr); printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n", atomic_read(&cred->usage), read_cred_subscribers(cred)); printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n", cred->uid, cred->euid, cred->suid, cred->fsuid); printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n", cred->gid, cred->egid, cred->sgid, cred->fsgid); #ifdef CONFIG_SECURITY printk(KERN_ERR "CRED: ->security is %p\n", cred->security); if ((unsigned long) cred->security >= PAGE_SIZE && (((unsigned long) cred->security & 0xffffff00) != (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))) printk(KERN_ERR "CRED: ->security {%x, %x}\n", ((u32*)cred->security)[0], ((u32*)cred->security)[1]); #endif } /* * report use of invalid credentials */ void __invalid_creds(const struct cred *cred, const char *file, unsigned line) { printk(KERN_ERR "CRED: Invalid credentials\n"); printk(KERN_ERR "CRED: At %s:%u\n", file, line); dump_invalid_creds(cred, "Specified", current); BUG(); } EXPORT_SYMBOL(__invalid_creds); /* * check the credentials on a process */ void __validate_process_creds(struct task_struct *tsk, const char *file, unsigned line) { if (tsk->cred == tsk->real_cred) { if (unlikely(read_cred_subscribers(tsk->cred) < 2 || creds_are_invalid(tsk->cred))) goto invalid_creds; } else { if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 || read_cred_subscribers(tsk->cred) < 1 || creds_are_invalid(tsk->real_cred) || creds_are_invalid(tsk->cred))) goto invalid_creds; } return; invalid_creds: printk(KERN_ERR "CRED: Invalid process credentials\n"); printk(KERN_ERR "CRED: At %s:%u\n", file, line); dump_invalid_creds(tsk->real_cred, "Real", tsk); if (tsk->cred != tsk->real_cred) dump_invalid_creds(tsk->cred, "Effective", tsk); else printk(KERN_ERR "CRED: Effective creds == Real creds\n"); BUG(); } EXPORT_SYMBOL(__validate_process_creds); /* * check creds for do_exit() */ void validate_creds_for_do_exit(struct task_struct *tsk) { kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})", tsk->real_cred, tsk->cred, atomic_read(&tsk->cred->usage), read_cred_subscribers(tsk->cred)); __validate_process_creds(tsk, __FILE__, __LINE__); } #endif /* CONFIG_DEBUG_CREDENTIALS */
./CrossVul/dataset_final_sorted/CWE-119/c/good_3674_0